code
stringlengths 0
390k
| repo_name
stringclasses 1
value | path
stringlengths 12
69
| language
stringclasses 1
value | license
stringclasses 1
value | size
int64 0
390k
|
---|---|---|---|---|---|
from puya import log
from puya.ir import models
from puya.ir.visitor_mem_replacer import MemoryReplacer
logger = log.get_logger(__name__)
def convert_to_cssa(sub: models.Subroutine) -> None:
logger.debug("Converting to CSSA")
max_versions = dict[str, int]()
for reg in sub.get_assigned_registers():
max_versions[reg.name] = max(max_versions.get(reg.name, 0), reg.version)
def make_prime(mem: models.Register) -> models.Register:
max_versions[mem.name] += 1
return models.Register(
source_location=mem.source_location,
ir_type=mem.ir_type,
name=mem.name,
version=max_versions[mem.name],
)
block_exit_copies = dict[models.BasicBlock, list[tuple[models.Register, models.Register]]]()
for phi_block in sub.body:
prime_phis = list[models.Phi]()
prime_copies = list[tuple[models.Register, models.Register]]()
for phi in phi_block.phis:
if not phi.args:
phi_block.ops.insert(
0,
models.Assignment(
targets=[phi.register],
source=models.Undefined(
ir_type=phi.ir_type, source_location=phi.source_location
),
source_location=phi.source_location,
),
)
else:
prime_args = list[models.PhiArgument]()
for phi_arg in phi.args:
prime_arg_reg = make_prime(phi_arg.value)
prime_arg = models.PhiArgument(
value=prime_arg_reg,
through=phi_arg.through,
)
prime_args.append(prime_arg)
# insert copy to prime arg at end of predecessor
block_exit_copies.setdefault(phi_arg.through, []).append(
(prime_arg_reg, phi_arg.value)
)
phi_prime = models.Phi(register=make_prime(phi.register), args=prime_args)
prime_phis.append(phi_prime)
prime_copies.append((phi.register, phi_prime.register))
phi_block.phis = prime_phis
if prime_copies:
phi_block.ops.insert(0, _make_copy_assignment(prime_copies))
for block, copies in block_exit_copies.items():
block.ops.append(_make_copy_assignment(copies))
def _make_copy_assignment(
copies: list[tuple[models.Register, models.Register]],
) -> models.Assignment:
if len(copies) == 1:
((dst, src),) = copies
targets = [dst]
source: models.ValueProvider = src
else:
targets_tup, sources_tup = zip(*copies, strict=True)
targets = list(targets_tup)
sources_list = list(sources_tup)
source = models.ValueTuple(source_location=None, values=sources_list)
return models.Assignment(
targets=targets,
source=source,
source_location=None,
)
def destructure_cssa(sub: models.Subroutine) -> None:
logger.debug("Removing Phi nodes")
# Once the sub is in CSSA, destructuring is trivial.
# All variables involved in a Phi can be given the same name, and the Phi can be removed.
replacements = dict[models.Register, models.Register]()
for block in sub.body:
phis = block.phis
block.phis = []
replacements.update({arg.value: phi.register for phi in phis for arg in phi.args})
MemoryReplacer.apply(sub.body, replacements=replacements)
|
algorandfoundation/puya
|
src/puya/ir/destructure/remove_phi.py
|
Python
|
NOASSERTION
| 3,583 |
from puya.ir.avm_ops import AVMOp
from puya.ir.models import Intrinsic, Value
from puya.ir.types_ import IRType
from puya.parse import SourceLocation
def select(
*, condition: Value, false: Value, true: Value, type_: IRType, source_location: SourceLocation
) -> Intrinsic:
return Intrinsic(
op=AVMOp.select,
args=[
false,
true,
condition,
],
types=[type_],
source_location=source_location,
)
|
algorandfoundation/puya
|
src/puya/ir/intrinsic_factory.py
|
Python
|
NOASSERTION
| 482 |
import contextlib
import copy
import typing
from collections import defaultdict
from collections.abc import Collection, Iterator
from pathlib import Path
from immutabledict import immutabledict
from puya import artifact_metadata, log
from puya.avm import AVMType
from puya.awst import nodes as awst_nodes
from puya.awst.awst_traverser import AWSTTraverser
from puya.awst.function_traverser import FunctionTraverser
from puya.awst.serialize import awst_from_json
from puya.context import CompileContext
from puya.ir import arc4_router
from puya.ir._contract_metadata import build_contract_metadata
from puya.ir._utils import make_subroutine
from puya.ir.arc4_router import AWSTContractMethodSignature
from puya.ir.builder.main import FunctionIRBuilder
from puya.ir.context import IRBuildContext
from puya.ir.models import Contract, LogicSignature, ModuleArtifact, Program, Subroutine
from puya.ir.optimize.dead_code_elimination import remove_unused_subroutines
from puya.ir.types_ import wtype_to_ir_type
from puya.program_refs import ProgramKind
from puya.utils import StableSet, attrs_extend, coalesce, set_add, set_remove
logger = log.get_logger(__name__)
CalleesLookup: typing.TypeAlias = defaultdict[awst_nodes.Function, set[awst_nodes.Function]]
_EMBEDDED_LIB = Path(__file__).parent / "_puya_lib.awst.json"
def awst_to_ir(context: CompileContext, awst: awst_nodes.AWST) -> Iterator[ModuleArtifact]:
compilation_set = _CompilationSetCollector.collect(context, awst)
ir_ctx = _build_subroutines(context, awst)
for node in compilation_set:
artifact_ctx = ir_ctx.for_root(node)
with artifact_ctx.log_exceptions():
if isinstance(node, awst_nodes.LogicSignature):
yield _build_logic_sig_ir(artifact_ctx, node)
else:
yield _build_contract_ir(artifact_ctx, node)
def _build_subroutines(ctx: CompileContext, awst: awst_nodes.AWST) -> IRBuildContext:
embedded_awst = awst_from_json(_EMBEDDED_LIB.read_text())
embedded_subroutines = {
node: make_subroutine(node, allow_implicits=False)
for node in embedded_awst
if isinstance(node, awst_nodes.Subroutine)
}
user_subroutines = {
node: make_subroutine(node, allow_implicits=True)
for node in awst
if isinstance(node, awst_nodes.Subroutine)
}
ir_ctx = attrs_extend(
IRBuildContext,
ctx,
subroutines={**embedded_subroutines, **user_subroutines},
awst=[*embedded_awst, *awst],
embedded_funcs_lookup={func.id: sub for func, sub in embedded_subroutines.items()},
)
for func, sub in ir_ctx.subroutines.items():
FunctionIRBuilder.build_body(ir_ctx, function=func, subroutine=sub)
return ir_ctx
class _CompilationSetCollector(AWSTTraverser):
def __init__(self, awst: awst_nodes.AWST, *, explicit_compilation_set: StableSet[str]) -> None:
super().__init__()
self._remaining_explicit_set: typing.Final = explicit_compilation_set
self.compilation_set: typing.Final = dict[
str, awst_nodes.Contract | awst_nodes.LogicSignature
]()
self._nodes_by_id: typing.Final = immutabledict[str, awst_nodes.RootNode](
{n.id: n for n in awst}
)
@property
def unresolved_explict_ids(self) -> Collection[str]:
return self._remaining_explicit_set
@classmethod
def collect(
cls, context: CompileContext, awst: awst_nodes.AWST
) -> Collection[awst_nodes.Contract | awst_nodes.LogicSignature]:
collector = cls(
awst, explicit_compilation_set=StableSet.from_iter(context.compilation_set)
)
for node in awst:
node.accept(collector)
for unresolved_id in collector.unresolved_explict_ids:
logger.error(f"unable to resolve compilation artifact '{unresolved_id}'")
return collector.compilation_set.values()
def visit_compiled_contract(self, compiled_contract: awst_nodes.CompiledContract) -> None:
super().visit_compiled_contract(compiled_contract)
node = self._nodes_by_id.get(compiled_contract.contract)
match node:
case awst_nodes.Contract() as contract:
self._visit_contract_or_lsig(contract, reference=True)
case None:
logger.error(
"unable to resolve contract reference",
location=compiled_contract.source_location,
)
case _:
logger.error(
"reference is not a contract", location=compiled_contract.source_location
)
def visit_compiled_logicsig(self, compiled_lsig: awst_nodes.CompiledLogicSig) -> None:
super().visit_compiled_logicsig(compiled_lsig)
node = self._nodes_by_id.get(compiled_lsig.logic_sig)
match node:
case awst_nodes.LogicSignature() as lsig:
self._visit_contract_or_lsig(lsig, reference=True)
case None:
logger.error(
"unable to resolve logic signature reference",
location=compiled_lsig.source_location,
)
case _:
logger.error(
"reference is not a logic signature", location=compiled_lsig.source_location
)
def visit_contract(self, contract: awst_nodes.Contract) -> None:
return self._visit_contract_or_lsig(contract)
def visit_logic_signature(self, lsig: awst_nodes.LogicSignature) -> None:
return self._visit_contract_or_lsig(lsig)
def _visit_contract_or_lsig(
self,
node: awst_nodes.Contract | awst_nodes.LogicSignature,
*,
reference: bool = False,
) -> None:
if node.id in self.compilation_set:
return # already visited
direct = set_remove(self._remaining_explicit_set, node.id)
if direct or reference:
self.compilation_set[node.id] = node
match node:
case awst_nodes.Contract():
super().visit_contract(node)
case awst_nodes.LogicSignature():
super().visit_logic_signature(node)
case unexpected:
typing.assert_never(unexpected)
def _build_logic_sig_ir(
ctx: IRBuildContext, logic_sig: awst_nodes.LogicSignature
) -> LogicSignature:
metadata = artifact_metadata.LogicSignatureMetaData(
ref=logic_sig.id,
description=logic_sig.docstring,
name=logic_sig.short_name,
)
avm_version = coalesce(logic_sig.avm_version, ctx.options.target_avm_version)
sig_ir = _make_program(
ctx, logic_sig.program, kind=ProgramKind.logic_signature, avm_version=avm_version
)
return LogicSignature(
program=sig_ir, metadata=metadata, source_location=logic_sig.source_location
)
def _build_contract_ir(ctx: IRBuildContext, contract: awst_nodes.Contract) -> Contract:
metadata, arc4_methods = build_contract_metadata(ctx, contract)
routing_data = {
md: AWSTContractMethodSignature(
target=awst_nodes.ContractMethodTarget(cref=cm.cref, member_name=cm.member_name),
return_type=cm.return_type,
parameter_types=[a.wtype for a in cm.args],
)
for cm, md in arc4_methods.items()
}
arc4_router_awst = arc4_router.create_abi_router(contract, routing_data)
ctx.routers[contract.id] = ctx.subroutines[arc4_router_awst] = make_subroutine(
arc4_router_awst, allow_implicits=False
)
# Build callees list, excluding calls from router.
# Used to if a function should implicitly return mutable reference parameters.
callees = SubroutineCollector.collect(
ctx, contract.approval_program, contract.clear_program, *arc4_methods
)
# construct unique Subroutine objects for each function
# that was referenced through either entry point
for method in contract.methods:
# ABI methods which are only called by the ABI router in the approval_program do not need
# to implicitly return anything as we know our router is not interested in anything but the
# explicit return value.
allow_implicits = bool(callees[method])
# make the emtpy subroutine, because functions reference other functions
func_ir = make_subroutine(method, allow_implicits=allow_implicits)
ctx.subroutines[method] = func_ir
# now construct the subroutine IR
for func, sub in ctx.subroutines.items():
if not sub.body: # in case something is pre-built (ie from embedded lib)
FunctionIRBuilder.build_body(ctx, function=func, subroutine=sub)
avm_version = coalesce(contract.avm_version, ctx.options.target_avm_version)
approval_ir = _make_program(
ctx, contract.approval_program, kind=ProgramKind.approval, avm_version=avm_version
)
clear_state_ir = _make_program(
ctx, contract.clear_program, kind=ProgramKind.clear_state, avm_version=avm_version
)
return Contract(
approval_program=approval_ir,
clear_program=clear_state_ir,
metadata=metadata,
source_location=contract.source_location,
)
def _make_program(
ctx: IRBuildContext,
main: awst_nodes.Function,
*,
kind: ProgramKind,
avm_version: int,
) -> Program:
assert not main.args, "main method should not have args"
return_type = wtype_to_ir_type(main.return_type)
assert return_type.avm_type == AVMType.uint64, "main method should return uint64 backed type"
main_sub = Subroutine(
id=main.full_name,
short_name=main.short_name,
parameters=[],
returns=[return_type],
body=[],
inline=False,
source_location=main.source_location,
)
FunctionIRBuilder.build_body(ctx, function=main, subroutine=main_sub)
program = Program(
kind=kind,
main=main_sub,
subroutines=tuple(ctx.subroutines.values()),
avm_version=avm_version,
)
remove_unused_subroutines(program)
program = copy.deepcopy(program)
return program
class SubroutineCollector(FunctionTraverser):
def __init__(self, context: IRBuildContext, callees: CalleesLookup) -> None:
self.context = context
self._seen = StableSet[awst_nodes.Function]()
self.callees = callees
self._func_stack = list[awst_nodes.Function]()
@classmethod
def collect(cls, context: IRBuildContext, *entry_points: awst_nodes.Function) -> CalleesLookup:
callees = CalleesLookup(set)
collector = cls(context, callees)
for start in entry_points:
with collector._enter_func(start): # noqa: SLF001
start.body.accept(collector)
return callees
@typing.override
def visit_subroutine_call_expression(self, expr: awst_nodes.SubroutineCallExpression) -> None:
super().visit_subroutine_call_expression(expr)
callee = self._func_stack[-1]
func = self.context.resolve_function_reference(
expr.target, expr.source_location, caller=callee
)
self.callees[func].add(callee)
if set_add(self._seen, func):
with self._enter_func(func):
func.body.accept(self)
@contextlib.contextmanager
def _enter_func(self, func: awst_nodes.Function) -> Iterator[None]:
self._func_stack.append(func)
try:
yield
finally:
self._func_stack.pop()
|
algorandfoundation/puya
|
src/puya/ir/main.py
|
Python
|
NOASSERTION
| 11,574 |
import abc
import typing
import typing as t
from collections.abc import Iterable, Iterator, Mapping, Sequence, Set
import attrs
from immutabledict import immutabledict
from puya import log
from puya.artifact_metadata import ContractMetaData, LogicSignatureMetaData
from puya.avm import AVMType
from puya.awst.txn_fields import TxnField
from puya.errors import CodeError, InternalError
from puya.ir.avm_ops import AVMOp
from puya.ir.avm_ops_models import ImmediateKind, OpSignature, StackType, Variant
from puya.ir.types_ import AVMBytesEncoding, IRType, stack_type_to_avm_type, stack_type_to_ir_type
from puya.ir.visitor import IRVisitor
from puya.parse import SourceLocation
from puya.program_refs import (
ContractReference,
LogicSigReference,
ProgramKind,
)
from puya.utils import unique
logger = log.get_logger(__name__)
T = t.TypeVar("T")
class Context(t.Protocol):
source_location: SourceLocation | None
class IRVisitable(Context, abc.ABC):
@abc.abstractmethod
def accept(self, visitor: IRVisitor[T]) -> T: ...
def __str__(self) -> str:
from puya.ir.to_text_visitor import ToTextVisitor
return self.accept(ToTextVisitor())
class _Freezable(abc.ABC):
def freeze(self) -> object:
data = self._frozen_data()
hash(data) # check we can hash
if data is self:
return data
return self.__class__, data
@abc.abstractmethod
def _frozen_data(self) -> object: ...
# NOTE! we don't want structural equality in the IR, everything needs to have eq=False
# the workaround to do this (trivial in Python to extend a decorator) AND have mypy
# not complain is ... well, see for yourself:
# https://www.attrs.org/en/stable/extending.html#wrapping-the-decorator
@attrs.define(eq=False)
class ValueProvider(IRVisitable, _Freezable, abc.ABC):
"""A node that provides/produces a value"""
source_location: SourceLocation | None = attrs.field(eq=False)
@property
@abc.abstractmethod
def types(self) -> Sequence[IRType]: ...
@property
@t.final
def atypes(self) -> Sequence[AVMType]:
return tuple(t.avm_type for t in self.types)
@attrs.frozen
class Value(ValueProvider, abc.ABC):
"""Base class for value types.
This is anything that *is* a value, so excludes
value *producers* such as subroutine invocations
"""
ir_type: IRType = attrs.field(repr=lambda x: x.name)
@property
@t.final
def atype(self) -> typing.Literal[AVMType.uint64, AVMType.bytes]:
return self.ir_type.avm_type
@property
def types(self) -> Sequence[IRType]:
return (self.ir_type,)
def _frozen_data(self) -> object:
return self
@attrs.frozen
class Undefined(Value):
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_undefined(self)
class Constant(Value, abc.ABC):
"""Base class for value constants - any value that is known at compile time"""
@attrs.define(eq=False)
class Op(IRVisitable, _Freezable, abc.ABC):
"""Base class for non-control-flow, non-phi operations
This is anything other than a Phi can appear inside a BasicBlock before the terminal ControlOp.
"""
@attrs.define(eq=False)
class ControlOp(IRVisitable, _Freezable, abc.ABC):
"""Base class for control-flow operations
These appear in a BasicBlock as the terminal node.
"""
source_location: SourceLocation | None
@abc.abstractmethod
def targets(self) -> Sequence["BasicBlock"]:
"""For graph traversal - result could be empty if it's the end of the current graph"""
@property
def unique_targets(self) -> list["BasicBlock"]:
return unique(self.targets())
@attrs.frozen
class Register(Value):
"""A register is an abstraction of "local variable storage".
This could be mapped to either value on the stack or a scratch slot during code generation.
"""
name: str
version: int
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_register(self)
@property
def local_id(self) -> str:
return f"{self.name}#{self.version}"
@attrs.define(eq=False)
class PhiArgument(IRVisitable):
value: Register
through: "BasicBlock"
source_location: None = attrs.field(default=None, init=False)
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_phi_argument(self)
@attrs.define(eq=False)
class Phi(IRVisitable, _Freezable):
"""Phi nodes are oracles that, given a list of other variables, always produce the
one that has been defined in the control flow thus far.
The term phi node comes from the literature on Static Single Assignment
"""
register: Register
args: list[PhiArgument] = attrs.field(factory=list)
source_location: None = attrs.field(default=None, init=False)
@property
def ir_type(self) -> IRType:
return self.register.ir_type
@property
def atype(self) -> AVMType:
return self.ir_type.avm_type
@property
def non_self_args(self) -> Sequence[PhiArgument]:
return tuple(a for a in self.args if a.value != self.register)
def _frozen_data(self) -> object:
return (
self.register.freeze(),
tuple((arg.through.id, arg.value.freeze()) for arg in self.args),
)
@args.validator
def check_args(self, _attribute: object, args: Sequence[PhiArgument]) -> None:
bad_args = [
arg for arg in args if arg.value.ir_type.maybe_avm_type != self.ir_type.maybe_avm_type
]
if bad_args:
raise InternalError(
f"Phi node (register={self.register}) received arguments with unexpected type(s):"
f" {', '.join(map(str, bad_args))}, "
)
seen_blocks = set[BasicBlock]()
for arg in args:
if arg.through in seen_blocks:
raise InternalError(f"Duplicate source to phi node: {arg.through}")
seen_blocks.add(arg.through)
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_phi(self)
@attrs.frozen(kw_only=True)
class UInt64Constant(Constant):
value: int
ir_type: IRType = attrs.field(default=IRType.uint64)
teal_alias: str | None = None
@ir_type.validator
def _validate_ir_type(self, _attribute: object, ir_type: IRType) -> None:
if ir_type.maybe_avm_type is not AVMType.uint64:
raise InternalError(
f"Invalid type for UInt64Constant: {ir_type}", self.source_location
)
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_uint64_constant(self)
@attrs.frozen(kw_only=True)
class ITxnConstant(Constant):
value: int
ir_type: IRType = attrs.field()
@ir_type.validator
def _validate_ir_type(self, _attribute: object, ir_type: IRType) -> None:
if ir_type not in (IRType.itxn_group_idx, IRType.itxn_field_set):
raise InternalError(f"invalid type for ITxnConstant: {ir_type}", self.source_location)
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_itxn_constant(self)
@attrs.frozen
class BigUIntConstant(Constant):
value: int
ir_type: IRType = attrs.field(default=IRType.biguint, init=False)
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_biguint_constant(self)
@attrs.frozen
class TemplateVar(Value):
name: str
ir_type: IRType
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_template_var(self)
@attrs.frozen(kw_only=True)
class BytesConstant(Constant):
"""Constant for types that are logically bytes"""
ir_type: IRType = attrs.field(default=IRType.bytes)
encoding: AVMBytesEncoding
value: bytes
@ir_type.validator
def _validate_ir_type(self, _attribute: object, ir_type: IRType) -> None:
if ir_type.maybe_avm_type is not AVMType.bytes:
raise InternalError(f"invalid type for BytesConstant: {ir_type}", self.source_location)
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_bytes_constant(self)
@attrs.define
class CompiledContractReference(Value):
artifact: ContractReference
field: TxnField
template_variables: Mapping[str, Value] = attrs.field(converter=immutabledict)
"""
template variable keys here are fully qualified with their appropriate prefix
"""
source_location: SourceLocation | None = attrs.field(eq=False)
program_page: int | None = None # used for approval and clear_state fields
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_compiled_contract_reference(self)
@attrs.define
class CompiledLogicSigReference(Value):
artifact: LogicSigReference
template_variables: Mapping[str, Value] = attrs.field(converter=immutabledict)
source_location: SourceLocation | None = attrs.field(eq=False)
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_compiled_logicsig_reference(self)
@attrs.frozen
class AddressConstant(Constant):
"""Constant for address literals"""
ir_type: IRType = attrs.field(default=IRType.bytes, init=False)
value: str = attrs.field()
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_address_constant(self)
@attrs.frozen
class MethodConstant(Constant):
"""Constant for method literals"""
ir_type: IRType = attrs.field(default=IRType.bytes, init=False)
value: str
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_method_constant(self)
@attrs.define(eq=False)
class InnerTransactionField(ValueProvider):
field: str
group_index: Value
is_last_in_group: Value
array_index: Value | None
type: IRType
def _frozen_data(self) -> object:
return self.field, self.group_index, self.is_last_in_group, self.array_index, self.type
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_inner_transaction_field(self)
@property
def types(self) -> Sequence[IRType]:
return (self.type,)
@attrs.define(eq=False)
class Intrinsic(Op, ValueProvider):
"""Any TEAL op (or pseudo-op) that doesn't interrupt control flow, in the "basic block" sense.
refs:
- https://developer.algorand.org/docs/get-details/dapps/avm/teal/opcodes/
- https://developer.algorand.org/docs/get-details/dapps/avm/teal/specification/#assembler-syntax
"""
op: AVMOp
# TODO: consider treating ops with no args (only immediates) as Value types
# e.g. `txn NumAppArgs` or `txna ApplicationArgs 0`
immediates: list[str | int] = attrs.field(factory=list)
args: list[Value] = attrs.field(factory=list)
error_message: str | None = None
"""If the program fails at this op, error_message will be displayed as the reason"""
_types: Sequence[IRType] = attrs.field(converter=tuple[IRType, ...])
@_types.default
def _default_types(self) -> tuple[IRType, ...]:
types = list[IRType]()
for stack_type in self.op_signature.returns:
ir_type = stack_type_to_ir_type(stack_type)
if ir_type is None:
raise InternalError(
f"Intrinsic op {self.op.name} requires return type information",
self.source_location,
)
types.append(ir_type)
return tuple(types)
def _frozen_data(self) -> object:
return self.op, tuple(self.immediates), tuple(self.args), self.error_message
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_intrinsic_op(self)
@property
def types(self) -> Sequence[IRType]:
return self._types
@property
def op_signature(self) -> OpSignature:
return self.op_variant.signature
@property
def op_variant(self) -> Variant:
return self.op.get_variant(self.immediates)
@_types.validator
def _validate_types(self, _attribute: object, types: Sequence[IRType]) -> None:
self._check_stack_types("return", self.op_signature.returns, types)
@args.validator
def _validate_args(self, _attribute: object, args: list[Value]) -> None:
arg_types = [a.ir_type for a in args]
self._check_stack_types("argument", self.op_signature.args, arg_types)
@immediates.validator
def _validate_immediates(self, _attribute: object, immediates: list[int | str]) -> None:
if len(self.op.immediate_types) != len(immediates):
logger.error("Incorrect number of immediates", location=self.source_location)
return
for imm_type, imm in zip(self.op.immediate_types, immediates, strict=True):
match imm_type:
case ImmediateKind.uint8:
if not isinstance(imm, int) or not (0 <= imm <= 255):
logger.critical(
"Invalid immediate, expected value between 0 and 255",
location=self.source_location,
)
case ImmediateKind.arg_enum:
if not isinstance(imm, str):
logger.critical(
"Invalid immediate, expected enum value",
location=self.source_location,
)
case _:
typing.assert_never(imm_type)
def _check_stack_types(
self,
context: str,
expected_types: Sequence[StackType],
source_types: Sequence[IRType],
) -> None:
target_types = [stack_type_to_avm_type(a) for a in expected_types]
if len(target_types) != len(source_types) or not all(
tt & st.avm_type for tt, st in zip(target_types, source_types, strict=True)
):
logger.error(
(
f"Incompatible {context} types on Intrinsic"
f"({self.op} {' '.join(map(str, self.immediates))}): "
f" received = ({', '.join(map(str, source_types))}),"
f" expected = ({', '.join(map(str, target_types))})"
),
location=self.source_location,
)
@attrs.define(eq=False)
class InvokeSubroutine(Op, ValueProvider):
"""Subroutine invocation
opcode: callsub"""
target: "Subroutine"
# TODO: validation for args
args: list[Value]
def _frozen_data(self) -> object:
return self.target.id, tuple(self.args)
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_invoke_subroutine(self)
@property
def types(self) -> Sequence[IRType]:
return self.target.returns
@attrs.define(eq=False)
class ValueTuple(ValueProvider):
values: Sequence[Value]
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_value_tuple(self)
@property
def types(self) -> Sequence[IRType]:
return [val.ir_type for val in self.values]
def _frozen_data(self) -> object:
return tuple(self.values)
@attrs.define(eq=False)
class Assignment(Op):
"""
Assignment of either a value or the result of something that produces a value to register(s)
"""
source_location: SourceLocation | None
targets: Sequence[Register] = attrs.field(
validator=[attrs.validators.min_len(1)], converter=tuple[Register, ...]
)
source: ValueProvider = attrs.field()
def _frozen_data(self) -> object:
return tuple(self.targets), self.source.freeze()
@source.validator
def _check_types(self, _attribute: object, source: ValueProvider) -> None:
target_ir_types = [target.ir_type for target in self.targets]
source_ir_types = list(source.types)
if target_ir_types != source_ir_types:
# TODO: need to update some optimiser code and/or
# introduce ReinterpretCast ValueProvider
# here before we can remove this fallback to AVMType here
target_atypes = [tt.maybe_avm_type for tt in target_ir_types]
source_atypes = [st.maybe_avm_type for st in source_ir_types]
if target_atypes != source_atypes:
raise CodeError(
f"Incompatible types on assignment:"
f" source = ({', '.join(map(str, source_ir_types))}),"
f" target = ({', '.join(map(str, target_ir_types))})",
self.source_location,
)
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_assignment(self)
@attrs.define(eq=False, str=False, kw_only=True)
class BasicBlock(Context):
"""IR basic block.
Contains a sequence of operations and ends with a terminating ControlOp.
Only the last op can be a ControlOp.
All generated Ops live in basic blocks. Basic blocks determine the
order of evaluation and control flow within a function. A basic
block is always associated with a single Subroutine or program main.
Ops that may terminate the program due to a panic or similar aren't treated as exits.
"""
source_location: SourceLocation # the location that caused the block to be constructed
phis: list[Phi] = attrs.field(factory=list)
ops: list[Op] = attrs.field(factory=list)
terminator: ControlOp | None = attrs.field(default=None)
predecessors: "list[BasicBlock]" = attrs.field(factory=list)
id: int | None = None
label: str | None = None
comment: str | None = None
@phis.validator
def _check_phis(self, _attribute: object, phis: list[Phi]) -> None:
for phi in phis:
attrs.validate(phi)
@ops.validator
def _check_ops(self, _attribute: object, ops: list[Op]) -> None:
for op in ops:
attrs.validate(op)
@terminator.validator
def _check_terminator(self, _attribute: object, terminator: ControlOp | None) -> None:
if terminator is not None:
attrs.validate(terminator)
@property
def terminated(self) -> bool:
return self.terminator is not None
@property
def successors(self) -> "Sequence[BasicBlock]":
if self.terminator is None:
return ()
return self.terminator.unique_targets
@property
def is_empty(self) -> bool:
return not (self.phis or self.ops or self.terminator)
@property
def all_ops(self) -> Iterator[Phi | Op | ControlOp]:
yield from self.phis.copy()
yield from self.ops.copy() # copy in case ops is modified
if self.terminator is not None:
yield self.terminator
def get_assigned_registers(self) -> Iterator[Register]:
for phi in self.phis:
yield phi.register
for op in self.ops:
if isinstance(op, Assignment):
yield from op.targets
def __str__(self) -> str:
return f"block@{self.id}"
@attrs.define(eq=False, kw_only=True)
class ConditionalBranch(ControlOp):
"""Branch based on zero-ness
opcode: bz+b or bnz+b
"""
condition: Value = attrs.field()
non_zero: BasicBlock
zero: BasicBlock
def _frozen_data(self) -> object:
return self.condition.freeze(), self.non_zero.id, self.zero.id
@condition.validator
def check(self, _attribute: object, result: Value) -> None:
if result.atype != AVMType.uint64:
raise CodeError(
"Branch condition can only be uint64 backed value", self.source_location
)
def targets(self) -> Sequence[BasicBlock]:
return self.zero, self.non_zero
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_conditional_branch(self)
@attrs.define(eq=False)
class Goto(ControlOp):
"""Unconditional jump
opcode: b
"""
target: BasicBlock
def _frozen_data(self) -> object:
return self.target.id
def targets(self) -> Sequence[BasicBlock]:
return (self.target,)
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_goto(self)
@attrs.define(eq=False)
class GotoNth(ControlOp):
"""Jump to the nth block in a list where n is defined by a UInt16 Value.
Jumps to the default if n is larger than the number of blocks
opcode: switch+b"""
value: Value
blocks: list[BasicBlock] = attrs.field()
default: BasicBlock
def _frozen_data(self) -> object:
return self.value, tuple(b.id for b in self.blocks), self.default.id
def targets(self) -> Sequence[BasicBlock]:
return *self.blocks, self.default
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_goto_nth(self)
@attrs.define(eq=False)
class Switch(ControlOp):
"""Jump based on comparison between a value and a fixed list of other values.
If no match is found, jumps to "default". This default option doesn't exist in
the underlying op code, but we need it to make this truly terminate the basic block
it's in, otherwise it'd violate certain CFG constraints.
opcode: match+b
"""
value: Value
cases: dict[Value, BasicBlock] = attrs.field()
default: BasicBlock
@cases.validator
def _check_cases(self, _attribute: object, cases: dict[Value, BasicBlock]) -> None:
if any(case.atype != self.value.atype for case in cases):
raise CodeError(
"Switch cases types mismatch with value to match", self.source_location
)
def _frozen_data(self) -> object:
return (
self.value,
tuple((v, b.id) for v, b in self.cases.items()),
self.default.id,
)
def targets(self) -> Sequence[BasicBlock]:
return *self.cases.values(), self.default
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_switch(self)
@attrs.define(eq=False)
class SubroutineReturn(ControlOp):
"""Return from within a subroutine
opcode: retsub
"""
result: list[Value]
def _frozen_data(self) -> object:
return tuple(self.result)
def targets(self) -> Sequence[BasicBlock]:
return ()
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_subroutine_return(self)
@attrs.define(eq=False)
class ProgramExit(ControlOp):
"""Return from and exit the program immediately
opcode: return
"""
result: Value = attrs.field()
@result.validator
def check(self, _attribute: object, result: Value) -> None:
if result.atype != AVMType.uint64:
raise CodeError("Can only exit with uint64 backed value", self.source_location)
def _frozen_data(self) -> object:
return self.result
def targets(self) -> Sequence[BasicBlock]:
return ()
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_program_exit(self)
@attrs.define(eq=False)
class Fail(ControlOp):
"""Exits immediately with a failure condition.
assert statements with a compile time constant that is false
should be translated to this node type in order to become ControlOp
opcode: err
"""
error_message: str | None
def targets(self) -> Sequence[BasicBlock]:
return ()
def accept(self, visitor: IRVisitor[T]) -> T:
return visitor.visit_fail(self)
def _frozen_data(self) -> object:
return self.error_message
@attrs.frozen
class Parameter(Register):
implicit_return: bool
@attrs.define(eq=False, kw_only=True)
class Subroutine(Context):
id: str
short_name: str
# source_location might be None if it was synthesized e.g. ARC4 approval method
source_location: SourceLocation | None
parameters: Sequence[Parameter]
_returns: Sequence[IRType]
body: list[BasicBlock] = attrs.field()
inline: bool | None
@property
def returns(self) -> list[IRType]:
return [*self._returns, *(p.ir_type for p in self.parameters if p.implicit_return)]
@body.validator
def _check_blocks(self, _attribute: object, body: list[BasicBlock]) -> None:
blocks = frozenset(body)
for block in body:
attrs.validate(block)
if block.terminator is None:
raise InternalError(
f"Unterminated block {block} assigned to subroutine {self.id}",
block.source_location,
)
for successor in block.successors:
if block not in successor.predecessors:
# Note: this check is here rather than on BasicBlock only because of
# circular validation issues where you're trying to update the CFG by
# replacing a terminator
raise InternalError(
f"{block} does not appear in all {block.terminator}"
f" target's predecessor lists - missing from {successor.id} at least",
block.terminator.source_location
or block.source_location
or self.source_location,
)
if not blocks.issuperset(block.predecessors):
raise InternalError(
f"{block} of subroutine {self.id} has predecessor block(s) outside of list",
block.source_location,
)
if not blocks.issuperset(block.successors):
raise InternalError(
f"{block} of subroutine {self.id} has predecessor block(s) outside of list",
block.source_location,
)
block_predecessors = dict.fromkeys(block.predecessors)
for phi in block.phis:
phi_blocks = dict.fromkeys(a.through for a in phi.args)
if block_predecessors.keys() != phi_blocks.keys():
phi_block_labels = list(map(str, phi_blocks.keys()))
pred_block_labels = list(map(str, block_predecessors.keys()))
raise InternalError(
f"{self.id}: mismatch between phi predecessors ({phi_block_labels})"
f" and {block} predecessors ({pred_block_labels})"
f" for phi node {phi}",
self.source_location,
)
used_registers = _get_used_registers(body)
defined_registers = frozenset(self.parameters) | frozenset(_get_assigned_registers(body))
bad_reads = used_registers - defined_registers
if bad_reads:
raise InternalError(
f"The following variables are used but never defined:"
f" {', '.join(map(str, bad_reads))}",
self.source_location,
)
@property
def entry(self) -> BasicBlock:
return self.body[0]
def get_assigned_registers(self) -> Iterator[Register]:
yield from self.parameters
yield from _get_assigned_registers(self.body)
def get_used_registers(self) -> Iterator[Register]:
yield from _get_used_registers(self.body)
def validate_with_ssa(self) -> None:
all_assigned = set[Register]()
for block in self.body:
for register in block.get_assigned_registers():
if register in all_assigned:
raise InternalError(
f"SSA constraint violated:"
f" {register.local_id} is assigned multiple times"
)
all_assigned.add(register)
attrs.validate(self)
def _get_assigned_registers(blocks: Sequence[BasicBlock]) -> Iterator[Register]:
# TODO: replace with visitor
for block in blocks:
for phi in block.phis:
yield phi.register
for op in block.ops:
if isinstance(op, Assignment):
yield from op.targets
def _get_used_registers(blocks: Sequence[BasicBlock]) -> Set[Register]:
from puya.ir.register_read_collector import RegisterReadCollector
collector = RegisterReadCollector()
for block in blocks:
for op in block.all_ops:
op.accept(collector)
return collector.used_registers
@attrs.define(kw_only=True, eq=False)
class Program(Context):
"""An individual TEAL output unit - e.g. an approval program, clear program, lsig"""
# note: main is represented as a subroutine for simplified handling,
# but should be "inlined" as main contract body during codegen.
# ie, it could be generated as subroutine "main" with proto 0 1,
# and then start of contract becomes:
#
# callsub main
# return
# main:
# proto 0 1
# ...
# retsub
#
# but, to save program size + op codes, this should be simplified to:
# ...
# return
#
# ie, just omit the subroutine header, and replace any&all retsub ops with a return instead
kind: ProgramKind
main: Subroutine
subroutines: Sequence[Subroutine]
avm_version: int
source_location: SourceLocation | None = None
def __attrs_post_init__(self) -> None:
if self.source_location is None:
self.source_location = self.main.source_location
@property
def all_subroutines(self) -> Iterable[Subroutine]:
yield self.main
yield from self.subroutines
@attrs.define(eq=False)
class Contract(Context):
source_location: SourceLocation
approval_program: Program
clear_program: Program
metadata: ContractMetaData
def all_subroutines(self) -> Iterable[Subroutine]:
from itertools import chain
yield from unique(
chain(
self.approval_program.all_subroutines,
self.clear_program.all_subroutines,
)
)
def all_programs(self) -> Iterable[Program]:
return [self.approval_program, self.clear_program]
@attrs.define(eq=False)
class LogicSignature(Context):
source_location: SourceLocation
program: Program
metadata: LogicSignatureMetaData
def all_subroutines(self) -> Iterable[Subroutine]:
return self.program.all_subroutines
def all_programs(self) -> Iterable[Program]:
return [self.program]
ModuleArtifact: t.TypeAlias = Contract | LogicSignature
|
algorandfoundation/puya
|
src/puya/ir/models.py
|
Python
|
NOASSERTION
| 30,346 |
algorandfoundation/puya
|
src/puya/ir/optimize/__init__.py
|
Python
|
NOASSERTION
| 0 |
|
from collections import Counter
from functools import cached_property
import networkx as nx # type: ignore[import-untyped]
from puya.ir import models
class CallGraph:
def __init__(self, program: models.Program) -> None:
self._program = program
@cached_property
def _graph(self) -> nx.MultiDiGraph:
graph = nx.MultiDiGraph()
for sub in self._program.all_subroutines:
graph.add_node(sub.id, ref=sub)
for sub in self._program.all_subroutines:
for block in sub.body:
for op in block.ops:
match op:
case (
models.InvokeSubroutine(target=target)
| models.Assignment(source=models.InvokeSubroutine(target=target))
):
graph.add_edge(sub.id, target.id)
return graph
@cached_property
def _paths(self) -> dict[str, dict[str, object]]:
return dict(nx.all_pairs_shortest_path(self._graph))
def callees(self, sub: models.Subroutine) -> list[tuple[str, int]]:
return list(Counter(callee_id for callee_id, _ in self._graph.in_edges(sub.id)).items())
def has_path(self, from_: str, to: str) -> bool:
try:
self._paths[from_][to]
except KeyError:
return False
else:
return True
def is_auto_recursive(self, sub: models.Subroutine) -> bool:
return bool(self._graph.has_predecessor(sub.id, sub.id))
|
algorandfoundation/puya
|
src/puya/ir/optimize/_call_graph.py
|
Python
|
NOASSERTION
| 1,535 |
from puya.errors import InternalError
from puya.ir import models
def get_definition(
subroutine: models.Subroutine, register: models.Register, *, should_exist: bool = True
) -> models.Assignment | models.Phi | None:
if register in subroutine.parameters:
return None
for block in subroutine.body:
for phi in block.phis:
if phi.register == register:
return phi
for op in block.ops:
if isinstance(op, models.Assignment) and register in op.targets:
return op
if should_exist:
raise InternalError(f"Register is not defined: {register}", subroutine.source_location)
return None
|
algorandfoundation/puya
|
src/puya/ir/optimize/_utils.py
|
Python
|
NOASSERTION
| 682 |
from puya import log
from puya.context import CompileContext
from puya.ir import models
from puya.ir.context import TMP_VAR_INDICATOR
from puya.ir.visitor_mem_replacer import MemoryReplacer
logger = log.get_logger(__name__)
def copy_propagation(_context: CompileContext, subroutine: models.Subroutine) -> bool:
set_lookup = dict[models.Register, list[models.Register]]()
all_equivalence_sets = list[list[models.Register]]()
modified = False
for block in subroutine.body:
for op in block.ops.copy():
match op:
case models.Assignment(targets=[target], source=models.Register() as source):
try:
equiv_set = set_lookup[source]
assert source in equiv_set
except KeyError:
set_lookup[source] = equiv_set = [source]
all_equivalence_sets.append(equiv_set)
equiv_set.append(target)
set_lookup[target] = equiv_set
block.ops.remove(op)
modified = True
replacements = dict[models.Register, models.Register]()
for equivalence_set in all_equivalence_sets:
assert len(equivalence_set) >= 2
equiv_set_ids = ", ".join(x.local_id for x in equivalence_set)
logger.debug(f"Found equivalence set: {equiv_set_ids}")
for reg in equivalence_set:
if TMP_VAR_INDICATOR not in reg.name:
replacement = reg
break
else:
replacement = equivalence_set[0]
for r in equivalence_set:
if r is not replacement:
replacements[r] = replacement
for block in subroutine.body:
for phi in block.phis.copy():
try:
(single_register,) = {replacements.get(arg.value, arg.value) for arg in phi.args}
except ValueError:
continue
else:
replacements[phi.register] = single_register
block.phis.remove(phi)
modified = True
replaced = MemoryReplacer.apply(subroutine.body, replacements=replacements)
if replaced:
logger.debug(f"Copy propagation made {replaced} modifications")
modified = True
return modified
|
algorandfoundation/puya
|
src/puya/ir/optimize/assignments.py
|
Python
|
NOASSERTION
| 2,325 |
import contextlib
import itertools
from collections.abc import Iterable
import attrs
from puya import log
from puya.context import CompileContext
from puya.ir import models
from puya.ir.visitor_mutator import IRMutator
from puya.utils import unique
logger = log.get_logger(__name__)
@attrs.define
class BlockReferenceReplacer(IRMutator):
find: models.BasicBlock
replacement: models.BasicBlock
@classmethod
def apply(
cls,
find: models.BasicBlock,
replacement: models.BasicBlock,
blocks: Iterable[models.BasicBlock],
) -> None:
replacer = cls(find=find, replacement=replacement)
for block in blocks:
replacer.visit_block(block)
def visit_block(self, block: models.BasicBlock) -> None:
super().visit_block(block)
if self.find in block.predecessors:
block.predecessors = [
self.replacement if b is self.find else b for b in block.predecessors
]
logger.debug(f"Replaced predecessor {self.find} with {self.replacement} in {block}")
def visit_phi_argument(self, arg: models.PhiArgument) -> models.PhiArgument:
if arg.through == self.find:
arg.through = self.replacement
return arg
def visit_conditional_branch(self, branch: models.ConditionalBranch) -> models.ControlOp:
if branch.zero == self.find:
branch.zero = self.replacement
if branch.non_zero == self.find:
branch.non_zero = self.replacement
return _replace_single_target_with_goto(branch)
def visit_goto(self, goto: models.Goto) -> models.Goto:
if goto.target == self.find:
goto.target = self.replacement
return goto
def visit_goto_nth(self, goto_nth: models.GotoNth) -> models.ControlOp:
for index, block in enumerate(goto_nth.blocks):
if block == self.find:
goto_nth.blocks[index] = self.replacement
if goto_nth.default == self.find:
goto_nth.default = self.replacement
return _replace_single_target_with_goto(goto_nth)
def visit_switch(self, switch: models.Switch) -> models.ControlOp:
for case, target in switch.cases.items():
if target == self.find:
switch.cases[case] = self.replacement
if switch.default == self.find:
switch.default = self.replacement
return _replace_single_target_with_goto(switch)
def _replace_single_target_with_goto(terminator: models.ControlOp) -> models.ControlOp:
"""
If a ControlOp has a single target, replace it with a Goto, otherwise return the original op.
"""
match terminator:
case models.ControlOp(unique_targets=[single_target]):
replacement = models.Goto(
source_location=terminator.source_location,
target=single_target,
)
logger.debug(f"replaced {terminator} with {replacement}")
return replacement
case _:
return terminator
def remove_linear_jump(_context: CompileContext, subroutine: models.Subroutine) -> bool:
changes = False
for block in subroutine.body[1:]:
match block.predecessors:
case [models.BasicBlock(terminator=models.Goto(target=successor)) as predecessor]:
assert successor is block
# can merge blocks when there is an unconditional jump between them
predecessor.phis.extend(block.phis)
predecessor.ops.extend(block.ops)
# this will update the predecessors of all block.successors to
# now point back to predecessor e.g.
# predecessor <-> block <-> [ss1, ...]
# predecessor <-> [ss1, ...]
BlockReferenceReplacer.apply(
find=block, replacement=predecessor, blocks=block.successors
)
predecessor.terminator = block.terminator
# update block to reflect modifications
subroutine.body.remove(block)
changes = True
logger.debug(f"Merged linear {block} into {predecessor}")
return changes
def remove_empty_blocks(_context: CompileContext, subroutine: models.Subroutine) -> bool:
changes = False
for block in subroutine.body.copy():
if not block.phis and not block.ops and isinstance(block.terminator, models.Goto):
empty_block = block
target = block.terminator.target
if target.phis:
logger.debug(
f"Not removing empty block {empty_block} because it's used by phi nodes"
)
continue
# this will replace any ops that pointed to block
BlockReferenceReplacer.apply(
find=empty_block,
replacement=target,
blocks=empty_block.predecessors,
)
# remove the empty block from the targets predecessors, and add and of the empty block
# predecessors that aren't already present
target.predecessors = unique(
itertools.chain(empty_block.predecessors, target.predecessors)
)
# might have already been replaced by BlockReferenceReplacer
with contextlib.suppress(ValueError):
target.predecessors.remove(empty_block)
if empty_block is subroutine.body[0]:
# place target at start of body so it's now the new entry block
subroutine.body.remove(target)
subroutine.body.insert(0, target)
subroutine.body.remove(empty_block)
changes = True
logger.debug(f"Removed empty block: {empty_block}")
return changes
|
algorandfoundation/puya
|
src/puya/ir/optimize/collapse_blocks.py
|
Python
|
NOASSERTION
| 5,841 |
"""
The compiled reference replacement is part of the optimizer pipeline for two reasons:
1.) It relies on any template variables provided being optimized into constant values
2.) Once compiled references are replaced there are additional optimizations that can occur
"""
import typing
from collections.abc import Mapping
import attrs
from immutabledict import immutabledict
from puya import log
from puya.algo_constants import HASH_PREFIX_PROGRAM, MAX_BYTES_LENGTH
from puya.awst.txn_fields import TxnField
from puya.compilation_artifacts import TemplateValue
from puya.context import ArtifactCompileContext
from puya.errors import CodeError, InternalError
from puya.ir import models as ir
from puya.ir.types_ import AVMBytesEncoding
from puya.ir.visitor_mutator import IRMutator
from puya.program_refs import ProgramKind
from puya.utils import (
Address,
biguint_bytes_eval,
calculate_extra_program_pages,
method_selector_hash,
sha512_256_hash,
)
logger = log.get_logger(__name__)
def replace_compiled_references(
context: ArtifactCompileContext, subroutine: ir.Subroutine
) -> bool:
replacer = CompiledReferenceReplacer(context)
for block in subroutine.body:
replacer.visit_block(block)
return replacer.modified
@attrs.define
class CompiledReferenceReplacer(IRMutator):
context: ArtifactCompileContext
modified: bool = False
def visit_compiled_logicsig_reference( # type: ignore[override]
self,
const: ir.CompiledLogicSigReference,
) -> ir.CompiledLogicSigReference | ir.Constant:
if not _is_constant(const.template_variables):
return const
template_constants = _get_template_constants(const.template_variables)
program_bytecode = self.context.build_program_bytecode(
const.artifact, ProgramKind.logic_signature, template_constants=template_constants
)
address_public_key = sha512_256_hash(HASH_PREFIX_PROGRAM + program_bytecode)
return ir.AddressConstant(
value=Address.from_public_key(address_public_key).address,
source_location=const.source_location,
)
def visit_compiled_contract_reference( # type: ignore[override]
self,
const: ir.CompiledContractReference,
) -> ir.CompiledContractReference | ir.Constant:
field = const.field
if field in (
TxnField.GlobalNumUint,
TxnField.GlobalNumByteSlice,
TxnField.LocalNumUint,
TxnField.LocalNumByteSlice,
):
state_total = self.context.get_state_totals(const.artifact)
match field:
case TxnField.GlobalNumUint:
total = state_total.global_uints
case TxnField.GlobalNumByteSlice:
total = state_total.global_bytes
case TxnField.LocalNumUint:
total = state_total.local_uints
case TxnField.LocalNumByteSlice:
total = state_total.local_bytes
case _:
raise InternalError(
f"Invalid state total field: {field.name}", const.source_location
)
return ir.UInt64Constant(
value=total,
source_location=const.source_location,
)
if not _is_constant(const.template_variables):
return const
template_constants = _get_template_constants(const.template_variables)
match field:
case TxnField.ApprovalProgramPages | TxnField.ClearStateProgramPages:
page = const.program_page
if page is None:
raise InternalError("expected non-none value for page", const.source_location)
program_bytecode = self.context.build_program_bytecode(
const.artifact,
(
ProgramKind.approval
if field == TxnField.ApprovalProgramPages
else ProgramKind.clear_state
),
template_constants=template_constants,
)
program_page = program_bytecode[
page * MAX_BYTES_LENGTH : (page + 1) * MAX_BYTES_LENGTH
]
return ir.BytesConstant(
value=program_page,
encoding=AVMBytesEncoding.base64,
source_location=const.source_location,
)
case TxnField.ExtraProgramPages:
approval_bytecode = self.context.build_program_bytecode(
const.artifact, ProgramKind.approval, template_constants=template_constants
)
clear_bytecode = self.context.build_program_bytecode(
const.artifact, ProgramKind.clear_state, template_constants=template_constants
)
return ir.UInt64Constant(
value=calculate_extra_program_pages(
len(approval_bytecode), len(clear_bytecode)
),
source_location=const.source_location,
)
raise InternalError(
f"Unhandled compiled reference field: {field.name}", const.source_location
)
def _is_constant(
template_variables: Mapping[str, ir.Value],
) -> typing.TypeGuard[Mapping[str, ir.Constant]]:
return all(isinstance(var, ir.Constant) for var in template_variables.values())
def _get_template_constants(
template_variables: Mapping[str, ir.Constant],
) -> immutabledict[str, TemplateValue]:
result = {
var: (_extract_constant_value(value), value.source_location)
for var, value in template_variables.items()
}
return immutabledict(result)
def _extract_constant_value(value: ir.Constant) -> int | bytes:
match value:
case ir.UInt64Constant(value=int_value):
return int_value
case ir.BytesConstant(value=bytes_value):
return bytes_value
case ir.BigUIntConstant(value=biguint):
return biguint_bytes_eval(biguint)
case ir.AddressConstant(value=addr):
address = Address.parse(addr)
return address.public_key
case ir.MethodConstant(value=method):
return method_selector_hash(method)
case ir.ITxnConstant():
raise CodeError(
"inner transactions cannot be used as a template variable",
value.source_location,
)
case _:
raise InternalError(
f"unhandled constant type: {type(value).__name__}",
location=value.source_location,
)
|
algorandfoundation/puya
|
src/puya/ir/optimize/compiled_reference.py
|
Python
|
NOASSERTION
| 6,777 |
from collections import defaultdict
import attrs
from puya import log
from puya.context import CompileContext
from puya.ir import models
from puya.ir.register_read_collector import RegisterReadCollector
from puya.ir.visitor_mutator import IRMutator
logger = log.get_logger(__name__)
_AnyOp = models.Op | models.ControlOp
def constant_replacer(_context: CompileContext, subroutine: models.Subroutine) -> bool:
constants = dict[models.Register, models.Constant]()
ssa_reads = defaultdict[models.Register, list[_AnyOp]](list)
for block in subroutine.body:
assert block.terminator is not None
ops: tuple[_AnyOp, ...] = (*block.ops, block.terminator)
for op in ops:
match op:
case models.Assignment(targets=[register], source=models.Constant() as constant):
constants[register] = constant
case _:
collector = RegisterReadCollector()
op.accept(collector)
for read_reg in collector.used_registers:
ssa_reads[read_reg].append(op)
modified = 0
work_list = constants.copy()
while work_list:
const_reg, const_val = work_list.popitem()
replacer = ConstantRegisterReplacer({const_reg: const_val})
for const_read in ssa_reads[const_reg]:
if isinstance(const_read, models.Assignment) and const_read.source == const_reg:
(register,) = const_read.targets
constants[register] = work_list[register] = const_val
const_read.accept(replacer)
modified += replacer.modified
phi_replace = {
phi.register: phi_constant
for block in subroutine.body
for phi in block.phis
if (phi_constant := _get_singular_phi_constant(phi, constants)) is not None
}
if phi_replace:
modified += ConstantRegisterReplacer.apply(phi_replace, to=subroutine)
return modified > 0
def _get_singular_phi_constant(
phi: models.Phi, constants: dict[models.Register, models.Constant]
) -> models.Constant | None:
try:
(constant,) = {constants[phi_arg.value] for phi_arg in phi.args}
except (KeyError, ValueError):
return None
else:
return constant
@attrs.define
class ConstantRegisterReplacer(IRMutator):
constants: dict[models.Register, models.Constant]
modified: int = 0
@classmethod
def apply(
cls, constants: dict[models.Register, models.Constant], to: models.Subroutine
) -> int:
replacer = cls(constants)
for block in to.body:
replacer.visit_block(block)
return replacer.modified
def visit_assignment(self, ass: models.Assignment) -> models.Assignment:
# don't visit target(s), needs to stay as Register
ass.source = ass.source.accept(self)
return ass
def visit_phi(self, phi: models.Phi) -> models.Phi:
# don't visit phi nodes, needs to stay as Register
return phi
def visit_register(self, reg: models.Register) -> models.Register:
try:
const = self.constants[reg]
except KeyError:
return reg
self.modified += 1
return const # type: ignore[return-value]
|
algorandfoundation/puya
|
src/puya/ir/optimize/constant_propagation.py
|
Python
|
NOASSERTION
| 3,275 |
import attrs
from puya.context import ArtifactCompileContext
@attrs.define(kw_only=True)
class IROptimizationContext(ArtifactCompileContext):
expand_all_bytes: bool
inlineable_calls: set[tuple[str, str]] = attrs.field(factory=set)
"""src -> dst pairs that can/should be inlined"""
constant_with_constant_args: dict[str, bool] = attrs.field(factory=dict)
|
algorandfoundation/puya
|
src/puya/ir/optimize/context.py
|
Python
|
NOASSERTION
| 374 |
import attrs
from puya import log
from puya.avm import AVMType
from puya.context import CompileContext
from puya.ir import models
from puya.ir.avm_ops import AVMOp
from puya.ir.optimize._utils import get_definition
from puya.ir.ssa import TrivialPhiRemover
from puya.ir.types_ import IRType
from puya.utils import unique
logger = log.get_logger(__name__)
# the ratio of default cases to all cases when a match of constant values
# can be simplified to a goto-nth
_SWITCH_SPARSENESS_SIMPLIFICATION_RATIO = 0.5
def simplify_control_ops(_context: CompileContext, subroutine: models.Subroutine) -> bool:
changes = False
modified_phis = []
def remove_target(parent: models.BasicBlock, to_remove: models.BasicBlock) -> None:
to_remove.predecessors.remove(parent)
for other_phi in to_remove.phis:
other_phi.args = [arg for arg in other_phi.args if arg.through is not parent]
modified_phis.append(other_phi)
for block in subroutine.body:
terminator = block.terminator
match terminator:
case models.ProgramExit(result=models.UInt64Constant(value=0)):
logger.debug("simplifying exit 0 to err")
block.terminator = models.Fail(
source_location=terminator.source_location, error_message=None
)
case models.ConditionalBranch(
condition=models.UInt64Constant(value=value), zero=zero, non_zero=non_zero
):
logger.debug("simplifying conditional branch with a constant into a goto")
if value == 0:
goto, other = zero, non_zero
else:
goto, other = non_zero, zero
block.terminator = models.Goto(
source_location=terminator.source_location, target=goto
)
if other is not goto:
remove_target(block, other)
case models.ConditionalBranch(
condition=condition,
zero=models.BasicBlock(
phis=[], ops=[], terminator=models.Fail(error_message=fail_comment)
) as err_block,
non_zero=non_zero,
source_location=source_location,
):
logger.debug("inlining condition branch to err block into an assert true")
block.ops.append(
models.Intrinsic(
op=AVMOp.assert_,
args=[condition],
error_message=fail_comment,
source_location=source_location,
)
)
block.terminator = models.Goto(target=non_zero, source_location=source_location)
if err_block not in block.successors:
remove_target(block, err_block)
case models.ConditionalBranch(
condition=models.Register() as condition,
non_zero=models.BasicBlock(
phis=[], ops=[], terminator=models.Fail(error_message=fail_comment)
) as err_block,
zero=zero,
source_location=source_location,
):
logger.debug("inlining condition branch to err block into an assert false")
not_condition = models.Register(
name=f"not%{condition.name}",
ir_type=IRType.bool,
version=condition.version,
source_location=source_location,
)
if get_definition(subroutine, not_condition, should_exist=False) is None:
block.ops.append(
models.Assignment(
targets=[not_condition],
source=models.Intrinsic(
op=AVMOp.not_,
args=[condition],
source_location=source_location,
),
source_location=source_location,
)
)
block.ops.append(
models.Intrinsic(
op=AVMOp.assert_,
args=[not_condition],
error_message=fail_comment,
source_location=source_location,
)
)
block.terminator = models.Goto(target=zero, source_location=source_location)
if err_block not in block.successors:
remove_target(block, err_block)
case (
models.ConditionalBranch(
condition=models.Register() as condition,
) as branch
) if (
isinstance(defn := get_definition(subroutine, condition), models.Assignment)
and isinstance(defn.source, models.Intrinsic)
and defn.source.op is AVMOp.not_
):
logger.debug(
f"simplified branch on !{condition} by swapping zero and non-zero targets"
)
block.terminator = attrs.evolve(
branch,
zero=branch.non_zero,
non_zero=branch.zero,
condition=defn.source.args[0],
)
case (
models.Switch(
value=models.Value(atype=AVMType.uint64) as value,
cases=cases,
default=default_block,
source_location=source_location,
) as switch
) if _can_simplify_switch(switch):
logger.debug("simplifying a switch with constants into goto nth")
# reduce to GotoNth
block_map = dict[int, models.BasicBlock]()
for case, case_block in cases.items():
assert isinstance(case, models.UInt64Constant)
block_map[case.value] = case_block
max_value = max(block_map)
block.terminator = models.GotoNth(
value=value,
blocks=[block_map.get(i, default_block) for i in range(max_value + 1)],
source_location=source_location,
default=default_block,
)
case models.GotoNth(
value=models.UInt64Constant(value=value),
blocks=blocks,
default=default_block,
):
logger.debug("simplifying a goto nth with a constant into a goto")
goto = blocks[value] if value < len(blocks) else default_block
block.terminator = models.Goto(
source_location=terminator.source_location, target=goto
)
for target in unique(terminator.targets()):
if target is not goto:
remove_target(block, target)
case models.GotoNth(
value=value,
blocks=[zero], # the constant here is the size of blocks
default=non_zero,
): # reduces to ConditionalBranch
logger.debug("simplifying a goto nth with two targets into a conditional branch")
block.terminator = models.ConditionalBranch(
condition=value,
zero=zero,
non_zero=non_zero,
source_location=terminator.source_location,
)
case _:
continue
changes = True
logger.debug(f"simplified terminator of {block} from {terminator} to {block.terminator}")
for phi in modified_phis:
TrivialPhiRemover.try_remove(phi, subroutine.body)
return changes
def _can_simplify_switch(switch: models.Switch) -> bool:
total_targets = 0
for case in switch.cases:
if not isinstance(case, models.UInt64Constant):
return False
total_targets = max(total_targets, case.value)
default_targets = total_targets - len(switch.cases)
return default_targets < (total_targets * _SWITCH_SPARSENESS_SIMPLIFICATION_RATIO)
|
algorandfoundation/puya
|
src/puya/ir/optimize/control_op_simplification.py
|
Python
|
NOASSERTION
| 8,330 |
import typing
from collections.abc import Iterable, Sequence, Set
import attrs
from puya import log
from puya.context import CompileContext
from puya.errors import InternalError
from puya.ir import models, visitor
from puya.ir._utils import bfs_block_order
from puya.ir.ssa import TrivialPhiRemover
from puya.utils import StableSet
logger = log.get_logger(__name__)
PURE_AVM_OPS = frozenset(
[
# group: ops that can't fail at runtime
# `txn FirstValidTime` technically could fail, but shouldn't happen on mainnet?
"txn",
"sha256",
"keccak256",
"sha3_256",
"sha512_256",
"bitlen",
# group: could only fail on a type error
"!",
"!=",
"&",
"&&",
"<",
"<=",
"==",
">",
">=",
"|",
"||",
"~",
"addw",
"mulw",
"itob",
"len",
"select",
"sqrt",
"shl",
"shr",
# group: fail if an input is zero
"%",
"/",
"expw",
"divmodw",
"divw",
# group: fail on over/underflow
"*",
"+",
"-",
"^",
"exp",
# group: fail on index out of bounds
"arg",
"arg_0",
"arg_1",
"arg_2",
"arg_3",
"args",
"extract",
"extract3",
"extract_uint16",
"extract_uint32",
"extract_uint64",
"replace2",
"replace3",
"setbit",
"setbyte",
"getbit",
"getbyte",
"gaid",
"gaids",
"gload",
"gloads",
"gloadss",
"substring",
"substring3",
"txna",
"txnas",
"gtxn",
"gtxna",
"gtxnas",
"gtxns",
"gtxnsa",
"gtxnsas",
"block",
# group: fail on input too large
"b%",
"b*",
"b+",
"b-",
"b/",
"b^",
"btoi",
# group: might fail on input too large? TODO: verify these
"b!=",
"b<",
"b<=",
"b==",
"b>",
"b>=",
"b&",
"b|",
"b~",
"bsqrt",
# group: fail on output too large
"concat",
"bzero",
# group: fail on input format / byte lengths
"base64_decode",
"json_ref",
"ecdsa_pk_decompress",
"ecdsa_pk_recover",
"ec_add",
"ec_pairing_check",
"ec_scalar_mul",
"ec_subgroup_check",
"ec_multi_scalar_mul",
"ec_map_to",
"ecdsa_verify",
"ed25519verify",
"ed25519verify_bare",
"vrf_verify",
# group: v11
"falcon_verify",
"sumhash512",
]
)
# ops that have no observable side effects outside the function
# note: originally generated basd on all ops that:
# - return a stack value (this, as of v10, yields no false negatives)
# - AND isn't in the generate_avm_ops.py list of exclusions (which are all control flow
# or pure stack manipulations)
# - AND isn't box_create or box_del, they were the only remaining false positives
IMPURE_SIDE_EFFECT_FREE_AVM_OPS = frozenset(
[
# group: ops that can't fail at runtime
"global", # OpcodeBudget is non-const, otherwise this could be pure
# group: could only fail on a type error
"app_global_get",
"app_global_get_ex",
"load",
# group: fail on resource not "available"
# TODO: determine if any of this group is pure
"acct_params_get",
"app_opted_in",
"app_params_get",
"asset_holding_get",
"asset_params_get",
"app_local_get",
"app_local_get_ex",
"balance",
"min_balance",
"box_extract",
"box_get",
"box_len",
# group: fail on index out of bounds
"loads",
# group: might fail depending on state
"itxn",
"itxna",
"itxnas",
"gitxn",
"gitxna",
"gitxnas",
]
)
_should_be_empty = PURE_AVM_OPS & IMPURE_SIDE_EFFECT_FREE_AVM_OPS
assert not _should_be_empty, _should_be_empty
SIDE_EFFECT_FREE_AVM_OPS = frozenset([*PURE_AVM_OPS, *IMPURE_SIDE_EFFECT_FREE_AVM_OPS])
@attrs.define
class SubroutineCollector(visitor.IRTraverser):
subroutines: StableSet[models.Subroutine] = attrs.field(factory=StableSet)
@classmethod
def collect(cls, program: models.Program) -> StableSet[models.Subroutine]:
collector = cls()
collector.visit_subroutine(program.main)
return collector.subroutines
def visit_subroutine(self, subroutine: models.Subroutine) -> None:
if subroutine not in self.subroutines:
self.subroutines.add(subroutine)
self.visit_all_blocks(subroutine.body)
def visit_invoke_subroutine(self, callsub: models.InvokeSubroutine) -> None:
self.visit_subroutine(callsub.target)
def remove_unused_subroutines(program: models.Program) -> bool:
subroutines = SubroutineCollector.collect(program)
if modified := (len(subroutines) != (1 + len(program.subroutines))):
to_keep = [p for p in program.subroutines if p in subroutines]
for p in program.subroutines:
if p not in subroutines:
logger.debug(f"removing unused subroutine {p.id}")
program.subroutines = to_keep
return modified
def remove_unused_variables(_context: CompileContext, subroutine: models.Subroutine) -> bool:
modified = 0
assignments = dict[tuple[models.BasicBlock, models.Assignment], set[models.Register]]()
for block, op, register in UnusedRegisterCollector.collect(subroutine):
if isinstance(op, models.Assignment):
assignments.setdefault((block, op), set()).add(register)
else:
assert register == op.register
block.phis.remove(op)
logger.debug(f"Removing unused variable {register.local_id}")
modified += 1
for (block, ass), registers in assignments.items():
if registers.symmetric_difference(ass.targets):
pass # some registers still used
elif isinstance(ass.source, models.Value | models.InnerTransactionField) or (
isinstance(ass.source, models.Intrinsic)
and ass.source.op.code in SIDE_EFFECT_FREE_AVM_OPS
):
for reg in sorted(registers, key=lambda r: r.local_id):
logger.debug(f"Removing unused variable {reg.local_id}")
block.ops.remove(ass)
modified += 1
else:
logger.debug(
f"Not removing unused assignment since source is not marked as pure: {ass}"
)
return modified > 0
@attrs.define(kw_only=True)
class UnusedRegisterCollector(visitor.IRTraverser):
used: set[models.Register] = attrs.field(factory=set)
assigned: dict[models.Register, tuple[models.BasicBlock, models.Assignment | models.Phi]] = (
attrs.field(factory=dict)
)
active_block: models.BasicBlock
@classmethod
def collect(
cls, sub: models.Subroutine
) -> Iterable[tuple[models.BasicBlock, models.Assignment | models.Phi, models.Register]]:
collector = cls(active_block=sub.entry)
collector.visit_all_blocks(sub.body)
for reg, (block, ass) in collector.assigned.items():
if reg not in collector.used:
yield block, ass, reg
@typing.override
def visit_block(self, block: models.BasicBlock) -> None:
self.active_block = block
super().visit_block(block)
@typing.override
def visit_assignment(self, ass: models.Assignment) -> None:
for target in ass.targets:
self.assigned[target] = (self.active_block, ass)
ass.source.accept(self)
@typing.override
def visit_phi(self, phi: models.Phi) -> None:
# don't visit phi.register, as this would mean the phi can never be considered unused
for arg in phi.args:
arg.accept(self)
self.assigned[phi.register] = (self.active_block, phi)
@typing.override
def visit_register(self, reg: models.Register) -> None:
self.used.add(reg)
def remove_unreachable_blocks(_context: CompileContext, subroutine: models.Subroutine) -> bool:
reachable_set = frozenset(bfs_block_order(subroutine.entry))
unreachable_blocks = [b for b in subroutine.body if b not in reachable_set]
if not unreachable_blocks:
return False
logger.debug(f"Removing unreachable blocks: {', '.join(map(str, unreachable_blocks))}")
reachable_blocks = []
for block in subroutine.body:
if block in reachable_set:
reachable_blocks.append(block)
if not reachable_set.issuperset(block.successors):
raise InternalError(
f"Block {block} has unreachable successor(s),"
f" but was not marked as unreachable itself"
)
if not reachable_set.issuperset(block.predecessors):
block.predecessors = [b for b in block.predecessors if b in reachable_set]
logger.debug(f"Removed unreachable predecessors from {block}")
UnreachablePhiArgsRemover.apply(unreachable_blocks, reachable_blocks)
subroutine.body = reachable_blocks
return True
@attrs.define
class UnreachablePhiArgsRemover(visitor.IRTraverser):
_unreachable_blocks: Set[models.BasicBlock]
_reachable_blocks: Sequence[models.BasicBlock]
@classmethod
def apply(
cls,
unreachable_blocks: Sequence[models.BasicBlock],
reachable_blocks: Sequence[models.BasicBlock],
) -> None:
collector = cls(frozenset(unreachable_blocks), reachable_blocks)
collector.visit_all_blocks(reachable_blocks)
def visit_phi(self, phi: models.Phi) -> None:
args_to_remove = [a for a in phi.args if a.through in self._unreachable_blocks]
if not args_to_remove:
return
logger.debug(
"Removing unreachable phi arguments: " + ", ".join(sorted(map(str, args_to_remove)))
)
phi.args = [a for a in phi.args if a not in args_to_remove]
if not phi.non_self_args:
raise InternalError(
f"undefined phi created when removing args through "
f"{', '.join(map(str, self._unreachable_blocks))}"
)
TrivialPhiRemover.try_remove(phi, self._reachable_blocks)
|
algorandfoundation/puya
|
src/puya/ir/optimize/dead_code_elimination.py
|
Python
|
NOASSERTION
| 10,570 |
import copy
import itertools
import typing
from collections import defaultdict
from collections.abc import Collection, Iterable, Iterator, Mapping, Sequence
import attrs
import networkx as nx # type: ignore[import-untyped]
from puya import log
from puya.ir import models
from puya.ir.context import TMP_VAR_INDICATOR
from puya.ir.optimize._call_graph import CallGraph
from puya.ir.optimize.context import IROptimizationContext
from puya.ir.optimize.intrinsic_simplification import COMPILE_TIME_CONSTANT_OPS
from puya.ir.visitor import IRTraverser
from puya.ir.visitor_mutator import IRMutator
from puya.utils import lazy_setdefault
logger = log.get_logger(__name__)
def analyse_subroutines_for_inlining(
context: IROptimizationContext,
program: models.Program,
routable_method_ids: Collection[str] | None,
) -> None:
context.inlineable_calls.clear()
context.constant_with_constant_args.clear()
call_graph = CallGraph(program)
for sub in program.subroutines:
if sub.inline is False:
pass # nothing to do
elif any(phi.args for phi in sub.entry.phis):
logger.debug(
f"function has phi node(s) with arguments in entry block: {sub.id}",
location=sub.source_location,
)
if sub.inline is True:
logger.warning(
"function not suitable for inlining due to complex control flow",
location=sub.source_location,
)
sub.inline = False
elif call_graph.is_auto_recursive(sub):
logger.debug(f"function is auto-recursive: {sub.id}", location=sub.source_location)
if sub.inline is True:
logger.warning("unable to inline recursive function", location=sub.source_location)
sub.inline = False
elif sub.inline is True:
for callee_id, _ in call_graph.callees(sub):
if not call_graph.has_path(sub.id, callee_id):
context.inlineable_calls.add((callee_id, sub.id))
else:
logger.warning(
f"not inlining call from {callee_id} to {sub.id}"
f" because call may be re-entrant",
location=sub.source_location,
)
else:
typing.assert_type(sub.inline, None)
if context.options.optimization_level == 0:
return
# for optimization levels below 2, skip auto-inlining routable methods into the approval
# program. mostly this can be beneficial in terms of #ops, but not always.
# also, it impacts the debugging experience
skip_routable_ids = frozenset[str]()
if context.options.optimization_level < 2:
skip_routable_ids = frozenset(routable_method_ids or ())
for sub in program.subroutines:
if sub.inline is None:
if _is_trivial(sub):
# special case for trivial methods, even when routable
logger.debug(f"marking trivial method {sub.id} as inlineable")
for callee_id, _ in call_graph.callees(sub):
# don't need to check re-entrancy here since there's no callsub
context.inlineable_calls.add((callee_id, sub.id))
elif sub.id not in skip_routable_ids:
match call_graph.callees(sub):
case [(callee_id, 1)]:
assert (
callee_id != sub.id
), f"function {sub.id} is auto-recursive and disconnected"
logger.debug(f"marking single-use function {sub.id} for inlining")
context.inlineable_calls.add((callee_id, sub.id))
if context.inlineable_calls:
return
for sub in program.subroutines:
if sub.inline is None and sub.id not in skip_routable_ids:
complexity = sum(
len(b.phis) + len(b.ops) + len(_not_none(b.terminator).targets()) for b in sub.body
)
threshold = max(3, 1 + len(sub._returns) + len(sub.parameters)) # noqa: SLF001
if complexity <= threshold:
logger.debug(
f"marking simple function {sub.id} for inlining"
f" ({complexity=} <= {threshold=})"
)
for callee_id, _ in call_graph.callees(sub):
if not call_graph.has_path(sub.id, callee_id):
context.inlineable_calls.add((callee_id, sub.id))
def _is_trivial(sub: models.Subroutine) -> bool:
match sub.body:
case [models.BasicBlock(phis=[], ops=[])]:
return True
return False
def perform_subroutine_inlining(
context: IROptimizationContext, subroutine: models.Subroutine
) -> bool:
inline_calls_to = {
to_id for from_id, to_id in context.inlineable_calls if from_id == subroutine.id
}
if not (inline_calls_to or context.options.optimization_level >= 2):
return False
modified = False
blocks_to_visit = subroutine.body.copy()
max_block_id = max(_not_none(block.id) for block in blocks_to_visit)
next_id = itertools.count(max_block_id + 1)
while blocks_to_visit:
block = blocks_to_visit.pop()
for op_index, op in enumerate(block.ops):
match op:
case models.Assignment(
targets=return_targets, source=models.InvokeSubroutine() as call
):
pass
case models.InvokeSubroutine() as call:
return_targets = []
case _:
continue
if call.target.id in inline_calls_to:
logger.debug(
f"inlining call to {call.target.id} in {subroutine.id}",
location=op.source_location,
)
elif (
context.options.optimization_level >= 2
and call.target.inline is None
and all(isinstance(arg, models.Constant) for arg in call.args)
and lazy_setdefault(
context.constant_with_constant_args,
call.target.id,
lambda _: _ConstantFunctionDetector.is_constant_with_constant_args(
call.target
),
)
):
logger.debug(
f"constant function call to {call.target.id} in {subroutine.id}",
location=op.source_location,
)
else:
continue
remainder, created_blocks = _inline_call(
block,
call,
next_id,
op_index,
return_targets,
list(subroutine.get_assigned_registers()),
)
# only visit the remainder of the block, the newly created blocks
# come from a different subroutine, so shouldn't be tested for inlining
# within the curren subroutine
blocks_to_visit.append(remainder)
created_blocks.append(remainder)
idx_after_block = subroutine.body.index(block) + 1
subroutine.body[idx_after_block:idx_after_block] = created_blocks
modified = True
break # we're done with this block, since it's been split
return modified
def _inline_call(
block: models.BasicBlock,
call: models.InvokeSubroutine,
next_id: Iterator[int],
op_index: int,
return_targets: Sequence[models.Register],
host_assigned_registers: list[models.Register],
) -> tuple[models.BasicBlock, list[models.BasicBlock]]:
# make a copy of the entire block graph, and adjust register versions
# to avoid any collisions, as well as updating block IDs
register_offsets = defaultdict[str, int](int)
for host_reg in host_assigned_registers:
register_offsets[host_reg.name] = max(
register_offsets[host_reg.name], host_reg.version + 1
)
new_blocks = _inlined_blocks(call.target, register_offsets)
for new_block in new_blocks:
new_block.id = next(next_id)
# split the block after the callsub instruction
ops_after = block.ops[op_index + 1 :]
del block.ops[op_index:]
terminator_after = block.terminator
assert terminator_after is not None
for succ in terminator_after.unique_targets:
succ.predecessors.remove(block)
inlined_entry = new_blocks[0]
block.terminator = models.Goto(target=inlined_entry, source_location=call.source_location)
inlined_entry.predecessors.append(block)
for phi in inlined_entry.phis:
# TODO: assign undefined and use that as phi arg so we can inline phi nodes with args.
# this requires finding a new register though, and is quite hard to write code
# that would trigger this, so leaving for now
assert not phi.args, "entry phi with arguments should have been prevented from inlining"
inlined_entry.ops.insert(
0,
models.Assignment(
targets=[phi.register],
source=models.Undefined(ir_type=phi.ir_type, source_location=phi.source_location),
source_location=phi.source_location,
),
)
inlined_entry.phis.clear()
# assign parameters before call
for arg, param in zip(call.args, call.target.parameters, strict=True):
updated_param = models.Register(
name=param.name,
ir_type=param.ir_type,
version=param.version + register_offsets[param.name],
source_location=param.source_location,
)
block.ops.append(
models.Assignment(
targets=[updated_param], source=arg, source_location=arg.source_location
)
)
returning_blocks = [
(new_block, new_block.terminator.result)
for new_block in new_blocks
if isinstance(new_block.terminator, models.SubroutineReturn)
]
# create the return block and insert as a predecessor of the original blocks target(s)
remainder = models.BasicBlock(
id=next(next_id),
phis=[],
ops=ops_after,
terminator=terminator_after,
predecessors=[],
comment=f"after_inlined_{call.target.id}",
source_location=block.source_location,
)
for succ in remainder.successors:
for phi in succ.phis:
for phi_arg in phi.args:
if phi_arg.through == block:
phi_arg.through = remainder
succ.predecessors.append(remainder)
# replace inlined retsubs with unconditional branches to the second block half
for new_block, _ in returning_blocks:
new_block.terminator = models.Goto(target=remainder, source_location=call.source_location)
remainder.predecessors.append(new_block)
num_returns = len(returning_blocks)
if num_returns == 1:
# if there is a single retsub, we can assign to the return variables in that block
# directly without violating SSA
((new_block, return_values),) = returning_blocks
if return_targets:
new_block.ops.append(
models.Assignment(
targets=return_targets,
source=models.ValueTuple(values=return_values, source_location=None),
source_location=None,
)
)
elif num_returns > 1:
# otherwise when there's more than on restsub block,
# return value(s) become phi node(s) in the second block half
return_phis = [models.Phi(register=ret_target) for ret_target in return_targets]
for new_block_idx, (new_block, return_values) in enumerate(returning_blocks):
for ret_idx, ret_phi in enumerate(return_phis):
ret_value = return_values[ret_idx]
if not isinstance(ret_value, models.Register):
tmp_value = ret_value
tmp_reg_name = f"{call.target.id}{TMP_VAR_INDICATOR}{ret_idx}"
ret_value = models.Register(
ir_type=ret_value.ir_type,
source_location=ret_value.source_location,
name=tmp_reg_name,
version=new_block_idx + register_offsets[tmp_reg_name],
)
new_block.ops.append(
models.Assignment(
targets=[ret_value],
source=tmp_value,
source_location=tmp_value.source_location,
)
)
ret_phi.args.append(models.PhiArgument(value=ret_value, through=new_block))
remainder.phis = return_phis
return remainder, new_blocks
def _not_none[T](x: T | None) -> T:
assert x is not None
return x
def _inlined_blocks(
sub: models.Subroutine, register_offsets: Mapping[str, int]
) -> list[models.BasicBlock]:
ref_collector = _SubroutineReferenceCollector()
ref_collector.visit_all_blocks(sub.body)
memo = {id(s): s for s in ref_collector.subroutines}
blocks = copy.deepcopy(sub.body, memo=memo)
_OffsetRegisterVersions.apply(blocks, register_offsets=register_offsets)
return blocks
@attrs.define
class _OffsetRegisterVersions(IRMutator):
register_offsets: Mapping[str, int]
@classmethod
def apply(
cls, blocks: Iterable[models.BasicBlock], *, register_offsets: Mapping[str, int]
) -> None:
replacer = cls(register_offsets=register_offsets)
for block in blocks:
replacer.visit_block(block)
def visit_register(self, reg: models.Register) -> models.Register:
return models.Register(
name=reg.name,
ir_type=reg.ir_type,
version=reg.version + self.register_offsets[reg.name],
source_location=reg.source_location,
)
@attrs.define
class _SubroutineReferenceCollector(IRTraverser):
subroutines: set[models.Subroutine] = attrs.field(factory=set)
@typing.override
def visit_invoke_subroutine(self, callsub: models.InvokeSubroutine) -> None:
self.subroutines.add(callsub.target)
super().visit_invoke_subroutine(callsub)
class _NonConstantFunctionError(Exception):
pass
class _ConstantFunctionDetector(IRTraverser):
def __init__(self) -> None:
self._block_graph = nx.DiGraph()
def has_cycle(self, *, start_from: int | None = None) -> bool:
try:
nx.find_cycle(self._block_graph, source=start_from)
except nx.NetworkXNoCycle:
return False
else:
return True
@classmethod
def is_constant_with_constant_args(cls, sub: models.Subroutine) -> bool:
"""Detect if a function is constant assuming all argument are constants"""
visitor = cls()
try:
visitor.visit_all_blocks(sub.body)
except _NonConstantFunctionError:
return False
return not visitor.has_cycle(start_from=sub.entry.id)
@typing.override
def visit_block(self, block: models.BasicBlock) -> None:
super().visit_block(block)
self._block_graph.add_node(block.id)
for target in block.successors:
self._block_graph.add_edge(block.id, target.id)
@typing.override
def visit_invoke_subroutine(self, callsub: models.InvokeSubroutine) -> None:
raise _NonConstantFunctionError
@typing.override
def visit_template_var(self, deploy_var: models.TemplateVar) -> None:
raise _NonConstantFunctionError
@typing.override
def visit_compiled_contract_reference(self, const: models.CompiledContractReference) -> None:
raise _NonConstantFunctionError
@typing.override
def visit_compiled_logicsig_reference(self, const: models.CompiledLogicSigReference) -> None:
raise _NonConstantFunctionError
@typing.override
def visit_intrinsic_op(self, intrinsic: models.Intrinsic) -> None:
if intrinsic.op.code not in COMPILE_TIME_CONSTANT_OPS:
raise _NonConstantFunctionError
super().visit_intrinsic_op(intrinsic)
@typing.override
def visit_itxn_constant(self, const: models.ITxnConstant) -> None:
raise _NonConstantFunctionError
@typing.override
def visit_inner_transaction_field(self, field: models.InnerTransactionField) -> None:
raise _NonConstantFunctionError
|
algorandfoundation/puya
|
src/puya/ir/optimize/inlining.py
|
Python
|
NOASSERTION
| 16,636 |
import attrs
from puya.context import CompileContext
from puya.ir import models
from puya.ir.avm_ops import AVMOp
from puya.ir.visitor_mutator import IRMutator
def inner_txn_field_replacer(_context: CompileContext, subroutine: models.Subroutine) -> bool:
modified = IntrinsicFieldReplacer.apply(to=subroutine)
return modified > 0
@attrs.define
class IntrinsicFieldReplacer(IRMutator):
modified: int = 0
@classmethod
def apply(cls, to: models.Subroutine) -> int:
replacer = cls()
for block in to.body:
replacer.visit_block(block)
return replacer.modified
def visit_inner_transaction_field(
self, field: models.InnerTransactionField
) -> models.InnerTransactionField | models.Intrinsic:
match field.group_index:
case models.ITxnConstant(value=group_index):
pass
case _:
return field
match field.is_last_in_group:
case models.UInt64Constant(value=is_last_in_group):
pass
case _:
is_last_in_group = 0
self.modified += 1
return (
models.Intrinsic(
op=AVMOp.itxnas if field.array_index else AVMOp.itxn,
immediates=[field.field],
args=[field.array_index] if field.array_index else [],
source_location=field.source_location,
)
if is_last_in_group
else models.Intrinsic(
op=AVMOp.gitxnas if field.array_index else AVMOp.gitxn,
immediates=[group_index, field.field],
args=[field.array_index] if field.array_index else [],
source_location=field.source_location,
)
)
|
algorandfoundation/puya
|
src/puya/ir/optimize/inner_txn.py
|
Python
|
NOASSERTION
| 1,774 |
import base64
import contextlib
import math
import operator
import typing
from collections import defaultdict, deque
from collections.abc import Callable, Container, Generator, Iterable, Mapping, Sequence, Set
from itertools import zip_longest
import attrs
from puya import algo_constants, log
from puya.avm import AVMType
from puya.ir import models
from puya.ir.avm_ops import AVMOp
from puya.ir.models import Intrinsic, UInt64Constant
from puya.ir.optimize.context import IROptimizationContext
from puya.ir.optimize.dead_code_elimination import SIDE_EFFECT_FREE_AVM_OPS
from puya.ir.register_read_collector import RegisterReadCollector
from puya.ir.types_ import AVMBytesEncoding, IRType
from puya.ir.visitor_mutator import IRMutator
from puya.parse import SourceLocation
from puya.utils import biguint_bytes_eval, method_selector_hash, set_add
logger = log.get_logger(__name__)
_AnyOp = models.Op | models.ControlOp | models.Phi
_RegisterAssignments = Mapping[models.Register, models.Assignment]
COMPILE_TIME_CONSTANT_OPS = frozenset(
[
# "generic" comparison ops
"==",
"!=",
# uint64 comparison ops
"<",
"<=",
">",
">=",
# boolean ops
"!",
"&&",
"||",
# uint64 bitwise ops
"&",
"|",
"^",
"~",
"shl",
"shr",
# uint64 math
"+",
"-",
"*",
"/",
"%",
"exp",
"sqrt",
# wide math
"addw",
"mulw",
"divw",
"expw",
"divmodw",
# bit/byte ops
"concat",
"extract",
"extract3",
"getbit",
"getbyte",
"len",
"replace2",
"replace3",
"setbit",
"setbyte",
"substring",
"substring3",
# conversion
"itob",
"btoi",
"extract_uint16",
"extract_uint32",
"extract_uint64",
# byte math
"b+",
"b-",
"b*",
"b/",
"b%",
"bsqrt",
# byte comaprison ops
"b==",
"b!=",
"b<",
"b<=",
"b>",
"b>=",
# byte bitwise ops
"b&",
"b|",
"b^",
"b~",
# misc
"assert",
"bzero",
"select",
"bitlen",
# ! unimplemented for constant arg evaluation
"base64_decode",
"ec_add",
"ec_map_to",
"ec_multi_scalar_mul",
"ec_pairing_check",
"ec_scalar_mul",
"ec_subgroup_check",
"ecdsa_pk_decompress",
"ecdsa_pk_recover",
"ecdsa_verify",
"ed25519verify",
"ed25519verify_bare",
"json_ref",
"keccak256",
"sha256",
"sha3_256",
"sha512_256",
"vrf_verify",
"sumhash512", # AVM 11
]
)
_CONSTANT_EVALUABLE: typing.Final[frozenset[str]] = COMPILE_TIME_CONSTANT_OPS - {
AVMOp.itob.code,
AVMOp.bzero.code,
}
def intrinsic_simplifier(context: IROptimizationContext, subroutine: models.Subroutine) -> bool:
if context.expand_all_bytes:
work_list = _AssignmentWorkQueue(COMPILE_TIME_CONSTANT_OPS)
else:
work_list = _AssignmentWorkQueue(_CONSTANT_EVALUABLE)
ssa_reads = _SSAReadTracker()
register_assignments = dict[models.Register, models.Assignment]()
for block in subroutine.body:
for op in block.all_ops:
ssa_reads.add(op)
if isinstance(op, models.Assignment):
work_list.enqueue(op)
if len(op.targets) == 1:
(target,) = op.targets
register_assignments[target] = op
modified = 0
while work_list:
ass, source = work_list.dequeue()
simplified = _try_fold_intrinsic(register_assignments, source)
if simplified is not None:
logger.debug(f"Simplified {source} to {simplified}")
with ssa_reads.update(ass):
ass.source = simplified
modified += 1
# if it became a Value, propagate to any assignment-readers and add to work list
if isinstance(simplified, models.Value):
(target,) = ass.targets
replacer = _RegisterValueReplacer(register=target, replacement=simplified)
for target_read in ssa_reads.get(target, copy=True):
if isinstance(target_read, models.Assignment):
work_list.enqueue(target_read)
# special case for indirection of otherwise non-inlined constants
match target_read:
case models.Assignment(
targets=[target_read_target],
source=models.Intrinsic(op=(AVMOp.bzero | AVMOp.itob)),
) if not context.expand_all_bytes:
for indirect_target_read in ssa_reads.get(target_read_target):
if isinstance(indirect_target_read, models.Assignment):
work_list.enqueue(indirect_target_read)
with ssa_reads.update(target_read):
target_read.accept(replacer)
modified += replacer.modified
else:
typing.assert_type(simplified, models.Intrinsic)
# source is still an intrinsic, add it back to the work list
work_list.enqueue(ass)
# add any assignment-readers to the work list
for target in ass.targets:
for target_read in ssa_reads.get(target):
if isinstance(target_read, models.Assignment):
work_list.enqueue(target_read)
for block in subroutine.body:
for op in block.ops:
if (
isinstance(op, models.Assignment)
and isinstance(op.source, models.Intrinsic)
and op.source.args
):
with_immediates = _try_convert_stack_args_to_immediates(op.source)
if with_immediates is not None:
logger.debug(f"Simplified {op.source} to {with_immediates}")
op.source = with_immediates
modified += 1
register_intrinsics = {
target: ass.source
for target, ass in register_assignments.items()
if isinstance(ass.source, models.Intrinsic)
}
modified += _simplify_conditional_branches(subroutine, register_intrinsics)
modified += _simplify_non_returning_intrinsics(subroutine, register_intrinsics)
return modified > 0
class _AssignmentWorkQueue:
def __init__(self, constant_evaluable: Container[str]) -> None:
self._constant_evaluable = constant_evaluable
self._dq = deque[tuple[models.Assignment, models.Intrinsic]]()
self._set = set[Sequence[models.Register]]()
def enqueue(self, op: models.Assignment) -> bool:
if (
isinstance(op.source, models.Intrinsic)
and op.source.op.code in self._constant_evaluable
and set_add(self._set, op.targets)
):
self._dq.append((op, op.source))
return True
return False
def dequeue(self) -> tuple[models.Assignment, models.Intrinsic]:
op, source = self._dq.popleft()
assert source is op.source
self._set.remove(op.targets)
return op, source
def __bool__(self) -> int:
return bool(self._dq)
class _SSAReadTracker:
def __init__(self) -> None:
self._data = defaultdict[models.Register, set[_AnyOp]](set)
def add(self, op: _AnyOp) -> None:
for read_reg in self._register_reads(op):
self._data[read_reg].add(op)
def get(self, reg: models.Register, *, copy: bool = False) -> Iterable[_AnyOp]:
reads = self._data.get(reg)
if reads is None:
return ()
if copy:
return reads.copy()
return reads
@contextlib.contextmanager
def update(self, op: _AnyOp) -> Generator[None, None, None]:
old_reads = self._register_reads(op)
yield
new_reads = self._register_reads(op)
for removed_read in old_reads - new_reads:
self._data[removed_read].remove(op)
for added_read in new_reads - old_reads:
self._data[added_read].add(op)
@staticmethod
def _register_reads(visitable: models.IRVisitable) -> Set[models.Register]:
collector = RegisterReadCollector()
visitable.accept(collector)
return collector.used_registers
@attrs.define(kw_only=True)
class _RegisterValueReplacer(IRMutator):
register: models.Register
replacement: models.Value
modified: int = 0
@typing.override
def visit_assignment(self, ass: models.Assignment) -> models.Assignment:
# don't visit target(s), needs to stay as Register
ass.source = ass.source.accept(self)
return ass
@typing.override
def visit_phi(self, phi: models.Phi) -> models.Phi:
# don't visit phi nodes, needs to stay as Register
return phi
@typing.override
def visit_register(self, reg: models.Register) -> models.Value: # type: ignore[override]
if reg != self.register:
return reg
self.modified += 1
return self.replacement
def _simplify_conditional_branches(
subroutine: models.Subroutine, register_intrinsics: Mapping[models.Register, models.Intrinsic]
) -> int:
modified = 0
branch_registers = dict[
models.Register, list[tuple[models.ConditionalBranch, models.BasicBlock]]
]()
for block in subroutine.body:
match block.terminator:
case (
models.ConditionalBranch(condition=models.Register() as cond) as branch
) if cond in register_intrinsics:
branch_registers.setdefault(cond, []).append((branch, block))
for target, usages in branch_registers.items():
intrinsic = register_intrinsics[target]
cond_maybe_simplified = _try_simplify_bool_intrinsic(intrinsic)
if cond_maybe_simplified is not None:
for branch, used_block in usages:
used_block.terminator = attrs.evolve(branch, condition=cond_maybe_simplified)
modified += 1
return modified
def _simplify_non_returning_intrinsics(
subroutine: models.Subroutine, register_intrinsics: Mapping[models.Register, models.Intrinsic]
) -> int:
modified = 0
for block in subroutine.body:
ops = []
for op in block.ops:
if not isinstance(op, models.Intrinsic):
ops.append(op)
else:
result = _visit_intrinsic_op(op, register_intrinsics)
if result is not op:
modified += 1
if result is not None:
ops.append(result)
block.ops[:] = ops
return modified
def _visit_intrinsic_op(
intrinsic: Intrinsic, register_intrinsics: Mapping[models.Register, models.Intrinsic]
) -> Intrinsic | None:
# if we get here, it means either the intrinsic doesn't have a return or it's ignored,
# in either case, the result has to be either an Op or None (ie delete),
# so we don't invoke _try_fold_intrinsic here
if intrinsic.op == AVMOp.assert_:
(cond,) = intrinsic.args
if isinstance(cond, models.UInt64Constant):
value = cond.value
if value:
return None
else:
# an assert 0 could be simplified to an err, but
# this would make it a ControlOp, so the block would
# need to be restructured
pass
elif cond in register_intrinsics:
cond_op = register_intrinsics[cond] # type: ignore[index]
assert_cond_maybe_simplified = _try_simplify_bool_intrinsic(cond_op)
if assert_cond_maybe_simplified is not None:
return attrs.evolve(intrinsic, args=[assert_cond_maybe_simplified])
return intrinsic
elif intrinsic.op == AVMOp.itxn_field:
(field_im,) = intrinsic.immediates
if field_im in ("ApprovalProgramPages", "ClearStateProgramPages"):
(page_value,) = intrinsic.args
if isinstance(page_value, models.BytesConstant) and page_value.value == b"":
return None
return intrinsic
elif intrinsic.op.code in SIDE_EFFECT_FREE_AVM_OPS:
logger.debug(f"Removing unused pure op {intrinsic}")
return None
elif intrinsic.args:
simplified = _try_convert_stack_args_to_immediates(intrinsic)
if simplified is not None:
return simplified
else:
return intrinsic
else:
return intrinsic
def _try_simplify_bool_condition(
register_assignments: _RegisterAssignments, cond: models.Value
) -> models.Value | None:
if cond in register_assignments:
cond_defn = register_assignments[cond] # type: ignore[index]
return _try_simplify_bool_intrinsic(cond_defn.source)
return None
def _try_simplify_bool_intrinsic(cond_op: models.ValueProvider) -> models.Value | None:
match cond_op:
case (
models.Intrinsic(
args=[
models.Value(atype=AVMType.uint64) as a,
models.Value(atype=AVMType.uint64) as b,
]
) as intrinsic
):
cond_maybe_simplified = _try_simplify_uint64_binary_op(
intrinsic, a, b, bool_context=True
)
if isinstance(cond_maybe_simplified, models.Value):
return cond_maybe_simplified
return None
def _try_convert_stack_args_to_immediates(intrinsic: Intrinsic) -> Intrinsic | None:
match intrinsic:
case Intrinsic(
op=AVMOp.gitxnas,
args=[models.UInt64Constant(value=array_index)],
immediates=[group_index, field],
):
return attrs.evolve(
intrinsic,
op=AVMOp.gitxna,
args=[],
immediates=[group_index, field, array_index],
)
case Intrinsic(
op=AVMOp.itxnas,
args=[models.UInt64Constant(value=array_index)],
immediates=[field],
):
return attrs.evolve(
intrinsic,
op=AVMOp.itxna,
args=[],
immediates=[field, array_index],
)
case Intrinsic(
op=(AVMOp.loads | AVMOp.stores as op),
args=[models.UInt64Constant(value=slot), *rest],
):
return attrs.evolve(
intrinsic,
immediates=[slot],
args=rest,
op=AVMOp.load if op == AVMOp.loads else AVMOp.store,
)
case Intrinsic(
op=(AVMOp.extract3 | AVMOp.extract),
args=[
models.Value(atype=AVMType.bytes),
models.UInt64Constant(value=S),
models.UInt64Constant(value=L),
],
) if S <= 255 and 1 <= L <= 255:
# note the lower bound of 1 on length, extract with immediates vs extract3
# have *very* different behaviour if the length is 0
return attrs.evolve(
intrinsic, immediates=[S, L], args=intrinsic.args[:1], op=AVMOp.extract
)
case Intrinsic(
op=AVMOp.substring3,
args=[
models.Value(atype=AVMType.bytes),
models.UInt64Constant(value=S),
models.UInt64Constant(value=E),
],
) if S <= 255 and E <= 255:
return attrs.evolve(
intrinsic, immediates=[S, E], args=intrinsic.args[:1], op=AVMOp.substring
)
case Intrinsic(
op=AVMOp.replace3,
args=[a, models.UInt64Constant(value=S), b],
) if S <= 255:
return attrs.evolve(intrinsic, immediates=[S], args=[a, b], op=AVMOp.replace2)
case Intrinsic(
op=AVMOp.args,
args=[models.UInt64Constant(value=idx)],
) if idx <= 255:
return attrs.evolve(intrinsic, op=AVMOp.arg, immediates=[idx], args=[])
return None
def _try_fold_intrinsic(
register_assignments: _RegisterAssignments, intrinsic: models.Intrinsic
) -> models.Value | models.Intrinsic | None:
op_loc = intrinsic.source_location
if intrinsic.op is AVMOp.select:
false, true, selector = intrinsic.args
selector_const = _get_int_constant(selector)
if selector_const is not None:
return true if selector_const else false
maybe_simplified_select_cond = _try_simplify_bool_condition(register_assignments, selector)
if maybe_simplified_select_cond is not None:
return attrs.evolve(intrinsic, args=[false, true, maybe_simplified_select_cond])
if false == true:
return true
match (
_get_byte_constant(register_assignments, false),
_get_byte_constant(register_assignments, true),
):
case (None, _) | (_, None):
pass
case (
models.BytesConstant(value=false_value),
models.BytesConstant(value=true_value) as true_bytes_const,
) if false_value == true_value:
return true_bytes_const
match _get_int_constant(false), _get_int_constant(true):
case (None, _) | (_, None):
pass
case 1, 0:
return attrs.evolve(intrinsic, op=AVMOp.not_, args=[selector])
case 0, int(true_int_value) if selector.ir_type == IRType.bool:
if true_int_value == 1:
return selector
return attrs.evolve(intrinsic, op=AVMOp.mul, args=[selector, true])
case 0, 1:
zero_const = UInt64Constant(value=0, source_location=intrinsic.source_location)
return attrs.evolve(intrinsic, op=AVMOp.neq, args=[selector, zero_const])
elif intrinsic.op is AVMOp.replace2:
(start,) = intrinsic.immediates
assert isinstance(start, int)
byte_arg_a, byte_arg_b = intrinsic.args
if (byte_const_a := _get_byte_constant(register_assignments, byte_arg_a)) is not None and (
byte_const_b := _get_byte_constant(register_assignments, byte_arg_b)
) is not None:
replaced = bytearray(byte_const_a.value)
replaced[start : start + len(byte_const_b.value)] = byte_const_b.value
return models.BytesConstant(
value=bytes(replaced),
encoding=_choose_encoding(byte_const_a.encoding, byte_const_b.encoding),
source_location=op_loc,
)
elif intrinsic.op is AVMOp.replace3:
byte_arg_a, start_s, byte_arg_b = intrinsic.args
if (
(start2 := _get_int_constant(start_s)) is not None
and (byte_const_a := _get_byte_constant(register_assignments, byte_arg_a)) is not None
and (byte_const_b := _get_byte_constant(register_assignments, byte_arg_b)) is not None
):
replaced = bytearray(byte_const_a.value)
replaced[start2 : start2 + len(byte_const_b.value)] = byte_const_b.value
return models.BytesConstant(
value=bytes(replaced),
encoding=_choose_encoding(byte_const_a.encoding, byte_const_b.encoding),
source_location=op_loc,
)
elif intrinsic.op is AVMOp.getbit:
match intrinsic.args:
case [
models.UInt64Constant(value=source, ir_type=IRType.uint64),
models.UInt64Constant(value=index),
]:
getbit_result = 1 if (source & (1 << index)) else 0
return models.UInt64Constant(value=getbit_result, source_location=op_loc)
case [
models.Value(atype=AVMType.bytes) as byte_arg,
models.UInt64Constant(value=index),
] if (byte_const := _get_byte_constant(register_assignments, byte_arg)) is not None:
binary_array = [
x for xs in [bin(bb)[2:].zfill(8) for bb in byte_const.value] for x in xs
]
the_bit = binary_array[index]
return models.UInt64Constant(source_location=op_loc, value=int(the_bit))
elif intrinsic.op is AVMOp.setbit:
match intrinsic.args:
case [
models.UInt64Constant(value=source, ir_type=IRType.uint64),
models.UInt64Constant(value=index),
models.UInt64Constant(value=value),
]:
if value:
setbit_result = source | (1 << index)
else:
setbit_result = source & ~(1 << index)
return models.UInt64Constant(value=setbit_result, source_location=op_loc)
case [
models.Value(atype=AVMType.bytes) as byte_arg,
models.UInt64Constant(value=index),
models.UInt64Constant(value=value),
] if (byte_const := _get_byte_constant(register_assignments, byte_arg)) is not None:
binary_array = [
x for xs in [bin(bb)[2:].zfill(8) for bb in byte_const.value] for x in xs
]
try:
binary_array[index] = "1" if value else "0"
except IndexError:
return None # would fail at runtime
binary_string = "".join(binary_array)
adjusted_const_value = int(binary_string, 2).to_bytes(
len(byte_const.value), byteorder="big"
)
return models.BytesConstant(
source_location=op_loc,
encoding=byte_const.encoding,
value=adjusted_const_value,
)
case [a, b, c]:
bool_arg_maybe_simplified = _try_simplify_bool_condition(register_assignments, c)
if bool_arg_maybe_simplified is not None:
return attrs.evolve(intrinsic, args=[a, b, bool_arg_maybe_simplified])
elif intrinsic.op.code.startswith("extract_uint"):
match intrinsic.args:
case [
models.BytesConstant(value=bytes_value),
models.UInt64Constant(value=offset),
]:
bit_size = int(intrinsic.op.code.removeprefix("extract_uint"))
byte_size = bit_size // 8
extracted = bytes_value[offset : offset + byte_size]
if len(extracted) != byte_size:
return None # would fail at runtime, lets hope this is unreachable 😬
uint64_result = int.from_bytes(extracted, byteorder="big", signed=False)
return models.UInt64Constant(
value=uint64_result,
source_location=op_loc,
)
elif intrinsic.op is AVMOp.concat:
left_arg, right_arg = intrinsic.args
left_const = _get_byte_constant(register_assignments, left_arg)
right_const = _get_byte_constant(register_assignments, right_arg)
if left_const is not None:
if left_const.value == b"":
return right_arg
if right_const is not None:
# two constants, just fold
target_encoding = _choose_encoding(left_const.encoding, right_const.encoding)
result_value = left_const.value + right_const.value
result = models.BytesConstant(
value=result_value,
encoding=target_encoding,
source_location=op_loc,
)
return result
elif right_const is not None:
if right_const.value == b"":
return left_arg
elif intrinsic.op.code.startswith("extract"):
match intrinsic:
case (
models.Intrinsic(
immediates=[int(S), int(L)],
args=[byte_arg],
)
| models.Intrinsic(
immediates=[],
args=[
byte_arg,
models.UInt64Constant(value=S),
models.UInt64Constant(value=L),
],
)
) if (byte_const := _get_byte_constant(register_assignments, byte_arg)) is not None:
# note there is a difference of behaviour between extract with stack args
# and with immediates - zero is to the end with immediates,
# and zero length with stacks
if intrinsic.immediates and L == 0:
extracted = byte_const.value[S:]
else:
extracted = byte_const.value[S : S + L]
return models.BytesConstant(
source_location=op_loc, encoding=byte_const.encoding, value=extracted
)
elif intrinsic.op.code.startswith("substring"):
match intrinsic:
case (
models.Intrinsic(
immediates=[int(S), int(E)],
args=[byte_arg],
)
| models.Intrinsic(
immediates=[],
args=[
byte_arg,
models.UInt64Constant(value=S),
models.UInt64Constant(value=E),
],
)
) if (byte_const := _get_byte_constant(register_assignments, byte_arg)) is not None:
if E < S:
return None # would fail at runtime, lets hope this is unreachable 😬
extracted = byte_const.value[S:E]
return models.BytesConstant(
source_location=op_loc, encoding=byte_const.encoding, value=extracted
)
elif not intrinsic.immediates:
match intrinsic.args:
case [models.Value(atype=AVMType.uint64) as x]:
return _try_simplify_uint64_unary_op(intrinsic, x)
case [
models.Value(atype=AVMType.uint64) as a,
models.Value(atype=AVMType.uint64) as b,
]:
return _try_simplify_uint64_binary_op(intrinsic, a, b)
case [models.Value(atype=AVMType.bytes) as x]:
return _try_simplify_bytes_unary_op(register_assignments, intrinsic, x)
case [
models.Value(atype=AVMType.bytes) as a,
models.Value(atype=AVMType.bytes) as b,
]:
return _try_simplify_bytes_binary_op(register_assignments, intrinsic, a, b)
return None
def _get_int_constant(value: models.Value) -> int | None:
if isinstance(value, models.UInt64Constant):
return value.value
return None
def _get_biguint_constant(
register_assignments: _RegisterAssignments, value: models.Value
) -> tuple[int, bytes, AVMBytesEncoding] | tuple[None, None, None]:
if isinstance(value, models.BigUIntConstant):
return value.value, biguint_bytes_eval(value.value), AVMBytesEncoding.base16
byte_const = _get_byte_constant(register_assignments, value)
if byte_const is not None and len(byte_const.value) <= 64:
return (
int.from_bytes(byte_const.value, byteorder="big", signed=False),
byte_const.value,
byte_const.encoding,
)
return None, None, None
def _byte_wise(op: Callable[[int, int], int], lhs: bytes, rhs: bytes) -> bytes:
return bytes([op(a, b) for a, b in zip_longest(lhs[::-1], rhs[::-1], fillvalue=0)][::-1])
def _choose_encoding(a: AVMBytesEncoding, b: AVMBytesEncoding) -> AVMBytesEncoding:
if a == b:
# preserve encoding if both equal
return a
# exclude utf8 from known choices, we don't preserve that encoding choice unless
# they're both utf8 strings, which is covered by the first check
known_binary_choices = {a, b} - {AVMBytesEncoding.utf8, AVMBytesEncoding.unknown}
if not known_binary_choices:
return AVMBytesEncoding.unknown
# pick the most compact encoding of the known binary encodings
if AVMBytesEncoding.base64 in known_binary_choices:
return AVMBytesEncoding.base64
if AVMBytesEncoding.base32 in known_binary_choices:
return AVMBytesEncoding.base32
return AVMBytesEncoding.base16
def _decode_address(address: str) -> bytes:
# Pad address so it's a valid b32 string
padded_address = address + (6 * "=")
address_bytes = base64.b32decode(padded_address)
public_key_hash = address_bytes[: algo_constants.PUBLIC_KEY_HASH_LENGTH]
return public_key_hash
def _get_byte_constant(
register_assignments: _RegisterAssignments, byte_arg: models.Value
) -> models.BytesConstant | None:
if byte_arg in register_assignments:
byte_arg_defn = register_assignments[byte_arg] # type: ignore[index]
match byte_arg_defn.source:
case models.Intrinsic(op=AVMOp.itob, args=[models.UInt64Constant(value=itob_arg)]):
return _eval_itob(itob_arg, byte_arg_defn.source_location)
case models.Intrinsic(op=AVMOp.bzero, args=[models.UInt64Constant(value=bzero_arg)]):
return _eval_bzero(bzero_arg, byte_arg_defn.source_location)
case models.Intrinsic(op=AVMOp.global_, immediates=["ZeroAddress"]):
return models.BytesConstant(
value=_decode_address(algo_constants.ZERO_ADDRESS),
encoding=AVMBytesEncoding.base32,
source_location=byte_arg.source_location,
)
elif isinstance(byte_arg, models.Constant):
if isinstance(byte_arg, models.BytesConstant):
return byte_arg
if isinstance(byte_arg, models.BigUIntConstant):
return models.BytesConstant(
value=biguint_bytes_eval(byte_arg.value),
encoding=AVMBytesEncoding.base16,
source_location=byte_arg.source_location,
)
if isinstance(byte_arg, models.AddressConstant):
return models.BytesConstant(
value=_decode_address(byte_arg.value),
encoding=AVMBytesEncoding.base32,
source_location=byte_arg.source_location,
)
if isinstance(byte_arg, models.MethodConstant):
return models.BytesConstant(
value=method_selector_hash(byte_arg.value),
encoding=AVMBytesEncoding.base16,
source_location=byte_arg.source_location,
)
return None
def _eval_itob(arg: int, loc: SourceLocation | None) -> models.BytesConstant:
return models.BytesConstant(
value=arg.to_bytes(8, byteorder="big", signed=False),
encoding=AVMBytesEncoding.base16,
source_location=loc,
)
def _eval_bzero(arg: int, loc: SourceLocation | None) -> models.BytesConstant | None:
if arg <= 64:
return models.BytesConstant(
value=b"\x00" * arg,
encoding=AVMBytesEncoding.base16,
source_location=loc,
)
return None
def _try_simplify_uint64_unary_op(
intrinsic: models.Intrinsic, arg: models.Value
) -> models.Value | None:
op_loc = intrinsic.source_location
x = _get_int_constant(arg)
if x is not None:
if intrinsic.op is AVMOp.not_:
not_x = 0 if x else 1
return models.UInt64Constant(value=not_x, source_location=op_loc)
elif intrinsic.op is AVMOp.bitwise_not:
inverted = x ^ 0xFFFFFFFFFFFFFFFF
return models.UInt64Constant(value=inverted, source_location=op_loc)
elif intrinsic.op is AVMOp.sqrt:
value = math.isqrt(x)
return models.UInt64Constant(value=value, source_location=op_loc)
elif intrinsic.op is AVMOp.bitlen:
return UInt64Constant(value=x.bit_length(), source_location=op_loc)
elif intrinsic.op is AVMOp.itob:
return _eval_itob(x, op_loc)
elif intrinsic.op is AVMOp.bzero:
return _eval_bzero(x, op_loc)
else:
logger.debug(f"Don't know how to simplify {intrinsic.op.code} of {x}")
return None
def _try_simplify_bytes_unary_op(
register_assignments: _RegisterAssignments, intrinsic: models.Intrinsic, arg: models.Value
) -> models.Value | None:
op_loc = intrinsic.source_location
if intrinsic.op is AVMOp.bsqrt:
biguint_const, _, _ = _get_biguint_constant(register_assignments, arg)
if biguint_const is not None:
value = math.isqrt(biguint_const)
return models.BigUIntConstant(value=value, source_location=op_loc)
else:
byte_const = _get_byte_constant(register_assignments, arg)
if byte_const is not None:
if intrinsic.op is AVMOp.bitwise_not_bytes:
inverted = bytes([x ^ 0xFF for x in byte_const.value])
return models.BytesConstant(
value=inverted, encoding=byte_const.encoding, source_location=op_loc
)
elif intrinsic.op is AVMOp.btoi:
converted = int.from_bytes(byte_const.value, byteorder="big", signed=False)
return models.UInt64Constant(value=converted, source_location=op_loc)
elif intrinsic.op is AVMOp.len_:
length = len(byte_const.value)
return models.UInt64Constant(value=length, source_location=op_loc)
elif intrinsic.op is AVMOp.bitlen:
converted = int.from_bytes(byte_const.value, byteorder="big", signed=False)
return UInt64Constant(value=converted.bit_length(), source_location=op_loc)
else:
logger.debug(f"Don't know how to simplify {intrinsic.op.code} of {byte_const}")
return None
def _try_simplify_uint64_binary_op(
intrinsic: models.Intrinsic, a: models.Value, b: models.Value, *, bool_context: bool = False
) -> models.Value | models.Intrinsic | None:
op = intrinsic.op
c: models.Value | int | None = None
if a == b:
match op:
case AVMOp.sub:
c = 0
case AVMOp.eq | AVMOp.lte | AVMOp.gte:
c = 1
case AVMOp.neq | AVMOp.lt | AVMOp.gt:
c = 0
case AVMOp.div_floor:
c = 1
case AVMOp.bitwise_xor:
c = 0
case AVMOp.bitwise_and | AVMOp.bitwise_or:
c = a
if c is None:
a_const = _get_int_constant(a)
b_const = _get_int_constant(b)
# a >= 0 <-> 1
if b_const == 0 and op == AVMOp.gte: # noqa: SIM114
c = 1
# 0 <= b <-> 1
elif a_const == 0 and op == AVMOp.lte:
c = 1
elif a_const == 1 and op == AVMOp.mul:
c = b
elif b_const == 1 and op == AVMOp.mul:
c = a
elif a_const == 0 and op in (AVMOp.add, AVMOp.or_):
c = b
elif b_const == 0 and op in (AVMOp.add, AVMOp.sub, AVMOp.or_):
c = a
elif 0 in (a_const, b_const) and op in (AVMOp.mul, AVMOp.and_):
c = 0
# 0 != b <-> b
# OR
# 0 < b <-> b
# OR
# 1 <= b <-> b
elif (bool_context or b.ir_type == IRType.bool) and (
(a_const == 0 and op in (AVMOp.neq, AVMOp.lt)) or (a_const == 1 and op == AVMOp.lte)
):
c = b
# a != 0 <-> a
# OR
# a > 0 <-> a
# OR
# a >= 1 <-> a
elif (bool_context or a.ir_type == IRType.bool) and (
(b_const == 0 and op in (AVMOp.neq, AVMOp.gt)) or (b_const == 1 and op == AVMOp.gte)
):
c = a
elif a_const is not None and b_const is not None:
match op:
case AVMOp.add:
c = a_const + b_const
case AVMOp.sub:
c = a_const - b_const
case AVMOp.mul:
c = a_const * b_const
case AVMOp.div_floor if b_const != 0:
c = a_const // b_const
case AVMOp.mod if b_const != 0:
c = a_const % b_const
case AVMOp.lt:
c = 1 if a_const < b_const else 0
case AVMOp.lte:
c = 1 if a_const <= b_const else 0
case AVMOp.gt:
c = 1 if a_const > b_const else 0
case AVMOp.gte:
c = 1 if a_const >= b_const else 0
case AVMOp.eq:
c = 1 if a_const == b_const else 0
case AVMOp.neq:
c = 1 if a_const != b_const else 0
case AVMOp.and_:
c = int(a_const and b_const)
case AVMOp.or_:
c = int(a_const or b_const)
case AVMOp.shl:
c = (a_const << b_const) % (2**64)
case AVMOp.shr:
c = a_const >> b_const
case AVMOp.exp:
if a_const == 0 and b_const == 0:
return None # would fail at runtime, lets hope this is unreachable 😬
c = a_const**b_const
case AVMOp.bitwise_or:
c = a_const | b_const
case AVMOp.bitwise_and:
c = a_const & b_const
case AVMOp.bitwise_xor:
c = a_const ^ b_const
case _:
logger.debug(
f"Don't know how to simplify {a_const} {intrinsic.op.code} {b_const}"
)
# 0 == b <-> !b
elif a_const == 0 and op == AVMOp.eq:
return attrs.evolve(intrinsic, op=AVMOp.not_, args=[b])
# a == 0 <-> !a
elif b_const == 0 and op == AVMOp.eq:
return attrs.evolve(intrinsic, op=AVMOp.not_, args=[a])
if not isinstance(c, int):
return c
if c < 0:
# Value cannot be folded as it would result in a negative uint64
return None
return models.UInt64Constant(value=c, source_location=intrinsic.source_location)
def _try_simplify_bytes_binary_op(
register_assignments: _RegisterAssignments,
intrinsic: models.Intrinsic,
a: models.Value,
b: models.Value,
) -> models.Value | None:
op = intrinsic.op
op_loc = intrinsic.source_location
c: models.Value | int | None = None
if a == b:
match op:
case AVMOp.sub_bytes:
c = 0
case AVMOp.eq_bytes | AVMOp.eq | AVMOp.lte_bytes | AVMOp.gte_bytes:
c = 1
case AVMOp.neq_bytes | AVMOp.neq | AVMOp.lt_bytes | AVMOp.gt_bytes:
c = 0
case AVMOp.div_floor_bytes:
c = 1
case AVMOp.bitwise_xor_bytes:
c = 0
case AVMOp.bitwise_and_bytes | AVMOp.bitwise_or_bytes:
c = a
if c is None:
a_const, a_const_bytes, a_encoding = _get_biguint_constant(register_assignments, a)
b_const, b_const_bytes, b_encoding = _get_biguint_constant(register_assignments, b)
if a_const == 1 and op == AVMOp.mul_bytes:
c = b
elif b_const == 1 and op == AVMOp.mul_bytes:
c = a
elif a_const == 0 and op == AVMOp.add_bytes:
c = b
elif b_const == 0 and op in (AVMOp.add_bytes, AVMOp.sub_bytes):
c = a
elif 0 in (a_const, b_const) and op == AVMOp.mul_bytes:
c = 0
elif a_const is not None and b_const is not None:
if typing.TYPE_CHECKING:
assert a_const_bytes is not None
assert b_const_bytes is not None
assert a_encoding is not None
assert b_encoding is not None
match op:
case AVMOp.add_bytes:
c = a_const + b_const
case AVMOp.sub_bytes:
c = a_const - b_const
case AVMOp.mul_bytes:
c = a_const * b_const
case AVMOp.div_floor_bytes:
c = a_const // b_const
case AVMOp.mod_bytes if b_const != 0:
c = a_const % b_const
case AVMOp.lt_bytes:
c = 1 if a_const < b_const else 0
case AVMOp.lte_bytes:
c = 1 if a_const <= b_const else 0
case AVMOp.gt_bytes:
c = 1 if a_const > b_const else 0
case AVMOp.gte_bytes:
c = 1 if a_const >= b_const else 0
case AVMOp.eq_bytes:
c = 1 if a_const == b_const else 0
case AVMOp.neq_bytes:
c = 1 if a_const != b_const else 0
case AVMOp.eq:
c = 1 if a_const_bytes == b_const_bytes else 0
case AVMOp.neq:
c = 1 if a_const_bytes != b_const_bytes else 0
case AVMOp.bitwise_or_bytes:
return models.BytesConstant(
value=_byte_wise(operator.or_, a_const_bytes, b_const_bytes),
encoding=_choose_encoding(a_encoding, b_encoding),
source_location=op_loc,
)
case AVMOp.bitwise_and_bytes:
return models.BytesConstant(
value=_byte_wise(operator.and_, a_const_bytes, b_const_bytes),
encoding=_choose_encoding(a_encoding, b_encoding),
source_location=op_loc,
)
case AVMOp.bitwise_xor_bytes:
return models.BytesConstant(
value=_byte_wise(operator.xor, a_const_bytes, b_const_bytes),
encoding=_choose_encoding(a_encoding, b_encoding),
source_location=op_loc,
)
case _:
logger.debug(
f"Don't know how to simplify {a_const} {intrinsic.op.code} {b_const}"
)
if not isinstance(c, int):
return c
if c < 0:
# don't fold to a negative
return None
# could look at StackType of op_signature.returns, but some are StackType.any
if op in (
AVMOp.eq_bytes,
AVMOp.eq,
AVMOp.neq_bytes,
AVMOp.neq,
AVMOp.lt_bytes,
AVMOp.lte_bytes,
AVMOp.gt_bytes,
AVMOp.gte_bytes,
):
return models.UInt64Constant(value=c, source_location=intrinsic.source_location)
else:
return models.BigUIntConstant(value=c, source_location=intrinsic.source_location)
|
algorandfoundation/puya
|
src/puya/ir/optimize/intrinsic_simplification.py
|
Python
|
NOASSERTION
| 43,369 |
import typing
from collections import defaultdict
import attrs
from puya.context import CompileContext
from puya.ir import models
from puya.ir.avm_ops import AVMOp
_ARRAY_FIELDS: typing.Final = frozenset(
(
"ApplicationArgs",
"Accounts",
"Assets",
"Applications",
"ApprovalProgramPages",
"ClearStateProgramPages",
)
)
_ZERO_DEFAULTS: typing.Final = frozenset(
(
"ExtraProgramPages",
"GlobalNumUint",
"GlobalNumByteSlice",
"LocalNumUint",
"LocalNumByteSlice",
)
)
def elide_itxn_field_calls(_context: CompileContext, subroutine: models.Subroutine) -> bool:
modified = False
for block in subroutine.body:
if _elide_within_block(block):
modified = True
return modified
def _elide_within_block(block: models.BasicBlock) -> bool:
groups = list[_ITxnGroup]()
current_group: _ITxnGroup | None = None
for op_idx, op in enumerate(block.ops):
if isinstance(op, models.Intrinsic):
match op.op:
case AVMOp.itxn_begin | AVMOp.itxn_next:
groups.append(current_group := _ITxnGroup(has_start=True))
case AVMOp.itxn_submit:
current_group = None
case AVMOp.itxn_field:
(field_im,) = op.immediates
assert isinstance(field_im, str)
if field_im not in _ARRAY_FIELDS:
if current_group is None:
groups.append(current_group := _ITxnGroup(has_start=False))
current_group.sets[field_im].append(op_idx)
if not groups:
return False
remove_indexes = set[int]()
for group in groups:
for field_im, indexes in group.sets.items():
final_idx = indexes.pop()
remove_indexes.update(indexes)
if group.has_start and field_im in _ZERO_DEFAULTS:
match block.ops[final_idx]:
case models.Intrinsic(args=[models.UInt64Constant(value=0)]):
remove_indexes.add(final_idx)
if not remove_indexes:
return False
block.ops = [op for idx, op in enumerate(block.ops) if idx not in remove_indexes]
return True
@attrs.define
class _ITxnGroup:
has_start: bool
sets: defaultdict[str, list[int]] = attrs.field(factory=lambda: defaultdict(list))
|
algorandfoundation/puya
|
src/puya/ir/optimize/itxn_field_elision.py
|
Python
|
NOASSERTION
| 2,440 |
from collections.abc import Callable, Collection, Iterable
import attrs
from puya import log
from puya.context import ArtifactCompileContext
from puya.ir import models
from puya.ir.optimize.assignments import copy_propagation
from puya.ir.optimize.collapse_blocks import remove_empty_blocks, remove_linear_jump
from puya.ir.optimize.compiled_reference import replace_compiled_references
from puya.ir.optimize.constant_propagation import constant_replacer
from puya.ir.optimize.context import IROptimizationContext
from puya.ir.optimize.control_op_simplification import simplify_control_ops
from puya.ir.optimize.dead_code_elimination import (
remove_unreachable_blocks,
remove_unused_subroutines,
remove_unused_variables,
)
from puya.ir.optimize.inlining import analyse_subroutines_for_inlining, perform_subroutine_inlining
from puya.ir.optimize.inner_txn import inner_txn_field_replacer
from puya.ir.optimize.intrinsic_simplification import intrinsic_simplifier
from puya.ir.optimize.itxn_field_elision import elide_itxn_field_calls
from puya.ir.optimize.repeated_code_elimination import repeated_expression_elimination
from puya.ir.to_text_visitor import render_program
from puya.utils import attrs_extend
MAX_PASSES = 100
SubroutineOptimizerCallable = Callable[[IROptimizationContext, models.Subroutine], bool]
logger = log.get_logger(__name__)
@attrs.define(kw_only=True)
class SubroutineOptimization:
id: str
desc: str
_optimize: SubroutineOptimizerCallable
loop: bool
@classmethod
def from_function(
cls, func: SubroutineOptimizerCallable, *, loop: bool = False
) -> "SubroutineOptimization":
func_name = func.__name__
func_desc = func_name.replace("_", " ").title().strip()
return SubroutineOptimization(id=func_name, desc=func_desc, optimize=func, loop=loop)
def optimize(self, context: IROptimizationContext, ir: models.Subroutine) -> bool:
did_modify = self._optimize(context, ir)
if did_modify:
while self.loop and self._optimize(context, ir):
pass
return did_modify
def get_subroutine_optimizations(optimization_level: int) -> Iterable[SubroutineOptimization]:
if optimization_level:
return [
SubroutineOptimization.from_function(perform_subroutine_inlining),
SubroutineOptimization.from_function(_split_parallel_copies),
SubroutineOptimization.from_function(constant_replacer),
SubroutineOptimization.from_function(copy_propagation),
SubroutineOptimization.from_function(intrinsic_simplifier),
SubroutineOptimization.from_function(elide_itxn_field_calls),
SubroutineOptimization.from_function(remove_unused_variables),
SubroutineOptimization.from_function(inner_txn_field_replacer),
SubroutineOptimization.from_function(replace_compiled_references),
SubroutineOptimization.from_function(simplify_control_ops, loop=True),
SubroutineOptimization.from_function(remove_linear_jump),
SubroutineOptimization.from_function(remove_empty_blocks),
SubroutineOptimization.from_function(remove_unreachable_blocks),
SubroutineOptimization.from_function(repeated_expression_elimination),
]
else:
return [
SubroutineOptimization.from_function(perform_subroutine_inlining),
SubroutineOptimization.from_function(_split_parallel_copies),
SubroutineOptimization.from_function(constant_replacer),
SubroutineOptimization.from_function(remove_unused_variables),
SubroutineOptimization.from_function(inner_txn_field_replacer),
SubroutineOptimization.from_function(replace_compiled_references),
]
def _split_parallel_copies(_ctx: ArtifactCompileContext, sub: models.Subroutine) -> bool:
# not an optimisation, but simplifies other optimisation code
any_modified = False
for block in sub.body:
ops = list[models.Op]()
modified = False
for op in block.ops.copy():
if isinstance(op, models.Assignment) and isinstance(op.source, models.ValueTuple):
for dst, src in zip(op.targets, op.source.values, strict=True):
modified = True
ops.append(
models.Assignment(
targets=[dst],
source=src,
source_location=op.source_location,
)
)
else:
ops.append(op)
if modified:
any_modified = True
block.ops = ops
return any_modified
def optimize_program_ir(
context: ArtifactCompileContext,
program: models.Program,
*,
routable_method_ids: Collection[str] | None,
) -> None:
pipeline = get_subroutine_optimizations(context.options.optimization_level)
opt_context = attrs_extend(IROptimizationContext, context, expand_all_bytes=False)
for pass_num in range(1, MAX_PASSES + 1):
program_modified = False
logger.debug(f"Begin optimization pass {pass_num}/{MAX_PASSES}")
analyse_subroutines_for_inlining(opt_context, program, routable_method_ids)
for subroutine in program.all_subroutines:
logger.debug(f"Optimizing subroutine {subroutine.id}")
for optimizer in pipeline:
logger.debug(f"Optimizer: {optimizer.desc}")
if optimizer.optimize(opt_context, subroutine):
program_modified = True
subroutine.validate_with_ssa()
if remove_unused_subroutines(program):
program_modified = True
if not program_modified:
logger.debug(f"No optimizations performed in pass {pass_num}, ending loop")
break
if context.options.output_optimization_ir:
render_program(context, program, qualifier="ssa.opt")
|
algorandfoundation/puya
|
src/puya/ir/optimize/main.py
|
Python
|
NOASSERTION
| 6,012 |
import functools
import operator
import typing
from collections.abc import Sequence
import attrs
from puya import log
from puya.context import CompileContext
from puya.ir import models
from puya.ir.avm_ops import AVMOp
from puya.ir.optimize.assignments import copy_propagation
from puya.ir.optimize.dead_code_elimination import PURE_AVM_OPS
from puya.ir.visitor import NoOpIRVisitor
logger = log.get_logger(__name__)
@attrs.frozen
class IntrinsicData:
op: AVMOp
immediates: tuple[str | int, ...]
args: tuple[models.Value, ...]
@classmethod
def from_op(cls, op: models.Intrinsic) -> typing.Self:
return cls(
op=op.op,
immediates=tuple(op.immediates),
args=tuple(op.args),
)
def repeated_expression_elimination(
context: CompileContext, subroutine: models.Subroutine
) -> bool:
any_modified = False
dom = compute_dominators(subroutine)
modified = True
while modified:
modified = False
block_asserted = dict[models.BasicBlock, set[models.Value]]()
block_const_intrinsics = dict[
models.BasicBlock, dict[IntrinsicData, Sequence[models.Register]]
]()
for block in subroutine.body:
visitor = RCEVisitor(block)
for op in block.ops.copy():
modified = bool(op.accept(visitor)) or modified
block_asserted[block] = visitor.asserted
block_const_intrinsics[block] = visitor.const_intrinsics
for block in subroutine.body:
dominators = dom[block]
if dominators:
visitor = RCEVisitor(
block,
# if there is a single dominator then reduce will return the original
# collection of that dominator, so copy to ensure original collections are
# not modified
const_intrinsics=functools.reduce(
operator.or_, (block_const_intrinsics[b] for b in dominators)
).copy(),
asserted=functools.reduce(
operator.or_, (block_asserted[b] for b in dominators)
).copy(),
)
for op in block.ops.copy():
modified = bool(op.accept(visitor)) or modified
if modified:
any_modified = True
copy_propagation(context, subroutine)
return any_modified
def compute_dominators(
subroutine: models.Subroutine,
) -> dict[models.BasicBlock, list[models.BasicBlock]]:
all_blocks = set(subroutine.body)
dom = {b: all_blocks if b.predecessors else {b} for b in subroutine.body}
changes = True
while changes:
changes = False
for block in reversed(subroutine.body):
if block.predecessors:
pred_dom = functools.reduce(set.intersection, (dom[p] for p in block.predecessors))
new = pred_dom | {block}
if new != dom[block]:
dom[block] = new
changes = True
return {
b: sorted(dom_set - {b}, key=lambda a: typing.cast(int, a.id))
for b, dom_set in dom.items()
}
@attrs.define
class RCEVisitor(NoOpIRVisitor[bool]):
block: models.BasicBlock
const_intrinsics: dict[IntrinsicData, Sequence[models.Register]] = attrs.field(factory=dict)
asserted: set[models.Value] = attrs.field(factory=set)
_assignment: models.Assignment | None = None
def visit_assignment(self, ass: models.Assignment) -> bool | None:
self._assignment = ass
remove = ass.source.accept(self)
self._assignment = None
return remove
def visit_intrinsic_op(self, intrinsic: models.Intrinsic) -> bool:
modified = False
if (ass := self._assignment) is not None:
# only consider ops with stack args because they're much more likely to
# produce extra stack manipulations
if intrinsic.args and intrinsic.op.code in PURE_AVM_OPS:
key = IntrinsicData.from_op(intrinsic)
try:
existing = self.const_intrinsics[key]
except KeyError:
self.const_intrinsics[key] = ass.targets
else:
logger.debug(
f"Replacing redundant declaration {ass}"
f" with copy of existing registers {existing}"
)
modified = True
if len(existing) == 1:
ass.source = existing[0]
else:
current_idx = self.block.ops.index(ass)
self.block.ops[current_idx : current_idx + 1] = [
models.Assignment(
targets=[dst], source=src, source_location=ass.source_location
)
for dst, src in zip(ass.targets, existing, strict=True)
]
elif intrinsic.op.code == "assert":
(assert_arg,) = intrinsic.args
if assert_arg in self.asserted:
logger.debug(f"Removing redundant assert of {assert_arg}")
modified = True
self.block.ops.remove(intrinsic)
else:
self.asserted.add(assert_arg)
return modified
|
algorandfoundation/puya
|
src/puya/ir/optimize/repeated_code_elimination.py
|
Python
|
NOASSERTION
| 5,447 |
from collections.abc import Set
import attrs
from puya.ir import models
from puya.ir.visitor import IRTraverser
@attrs.frozen
class RegisterReadCollector(IRTraverser):
_used_registers: list[models.Register] = attrs.field(factory=list, init=False)
@property
def used_registers(self) -> Set[models.Register]:
return dict.fromkeys(self._used_registers).keys()
def visit_register(self, reg: models.Register) -> None:
self._used_registers.append(reg)
def visit_assignment(self, ass: models.Assignment) -> None:
ass.source.accept(self)
def visit_phi(self, phi: models.Phi) -> None:
for arg in phi.args:
self.visit_phi_argument(arg)
|
algorandfoundation/puya
|
src/puya/ir/register_read_collector.py
|
Python
|
NOASSERTION
| 702 |
from collections.abc import Sequence
import attrs
from puya import log
from puya.errors import InternalError
from puya.ir import models as ir
from puya.ir.types_ import IRType
from puya.ir.visitor_mutator import IRMutator
from puya.parse import SourceLocation
logger = log.get_logger(__name__)
class BraunSSA:
"""
Constructs CFG+SSA IR directly from an AST like-structure, without having to go
through an intermediate non-SSA IR and then compute dominance frontiers etc.
The primary reference is https://pp.ipd.kit.edu/uploads/publikationen/braun13cc.pdf,
but also of interest is https://pp.ipd.kit.edu/uploads/publikationen/buchwald16cc.pdf,
in which the algorithm is converted into a functional programming form,
and formally verified.
"""
def __init__(
self,
all_blocks: Sequence[ir.BasicBlock],
live_variables: Sequence[ir.Register],
active_block: ir.BasicBlock,
) -> None:
self._all_blocks = all_blocks # note: this is a shared reference with BlocksBuilder
self._sealed_blocks = set[ir.BasicBlock]()
self._current_def = dict[str, dict[ir.BasicBlock, ir.Register]]()
self._incomplete_phis = dict[ir.BasicBlock, list[ir.Phi]]()
self._variable_versions = dict[str, int]()
# initialize any live variables at the start of the subroutines, i.e. parameters
for parameter in live_variables:
self.write_variable(parameter.name, active_block, parameter)
self._variable_versions[parameter.name] = 1
def has_version(self, variable: str) -> bool:
return variable in self._variable_versions
def write_variable(self, variable: str, block: ir.BasicBlock, value: ir.Register) -> None:
self._current_def.setdefault(variable, {})[block] = value
def has_write(self, variable: str, block: ir.BasicBlock) -> bool:
try:
self._current_def[variable][block]
except KeyError:
return False
else:
return True
def read_variable(self, variable: str, ir_type: IRType, block: ir.BasicBlock) -> ir.Register:
try:
result = self._current_def[variable][block]
except KeyError:
result = self._read_variable_recursive(variable, ir_type, block)
return result
def _read_variable_recursive(
self, variable: str, ir_type: IRType, block: ir.BasicBlock
) -> ir.Register:
value: ir.Register
if not self.is_sealed(block):
logger.debug(
f"Looking for '{variable}' in an unsealed block creating an incomplete "
f"Phi: {block}"
)
# incomplete CFG
phi = self._new_phi(block, variable, ir_type)
self._incomplete_phis.setdefault(block, []).append(phi)
value = phi.register
elif len(block.predecessors) == 1:
value = self.read_variable(variable, ir_type, block.predecessors[0])
else:
# break any potential cycles with empty Phi
phi = self._new_phi(block, variable, ir_type)
self.write_variable(variable, block, phi.register)
value = self._add_phi_operands(phi, block)
self.write_variable(variable, block, value)
return value
def new_register(
self, name: str, ir_type: IRType, location: SourceLocation | None
) -> ir.Register:
version = self._variable_versions.get(name, 0)
self._variable_versions[name] = version + 1
return ir.Register(source_location=location, name=name, version=version, ir_type=ir_type)
def is_sealed(self, block: ir.BasicBlock) -> bool:
return block in self._sealed_blocks
def verify_complete(self) -> None:
unsealed = [b for b in self._all_blocks if not self.is_sealed(b)]
if unsealed:
raise InternalError(
f"{len(unsealed)} block/s were not sealed: " + ", ".join(map(str, unsealed))
)
incomplete_phis = [p for block_phis in self._incomplete_phis.values() for p in block_phis]
if incomplete_phis:
# if we get here, there is a bug in this algorithm itself
raise InternalError(
f"{len(incomplete_phis)} phi node/s are incomplete: "
+ ", ".join(map(str, incomplete_phis))
)
def _new_phi(self, block: ir.BasicBlock, variable: str, ir_type: IRType) -> ir.Phi:
reg = self.new_register(variable, ir_type, location=None)
phi = ir.Phi(register=reg)
block.phis.append(phi)
logger.debug(
f"Created Phi assignment: {phi} while trying to resolve '{variable}' in {block}"
)
return phi
def _add_phi_operands(self, phi: ir.Phi, block: ir.BasicBlock) -> ir.Register:
variable = phi.register.name
for block_pred in block.predecessors:
pred_variable = self.read_variable(variable, phi.ir_type, block_pred)
phi.args.append(
ir.PhiArgument(
value=pred_variable,
through=block_pred,
)
)
logger.debug(f"Added {pred_variable} to Phi node: {phi} in {block_pred}")
attrs.validate(phi)
trivial_replacements = TrivialPhiRemover.try_remove(phi, self._all_blocks)
if not trivial_replacements:
return phi.register
_, result = trivial_replacements[0]
variable_def = self._current_def[variable]
for removed_phi, replacement_memory in trivial_replacements:
for unsealed_block, incomplete_phis in self._incomplete_phis.items():
if removed_phi in incomplete_phis:
raise InternalError(
f"removed an incomplete phi {removed_phi} from block {unsealed_block}!!"
)
if replacement_memory.name != variable:
raise InternalError(
"Tangled phi web created during SSA construction",
replacement_memory.source_location,
)
if removed_phi.register == result:
result = replacement_memory
# ensure replacement is also recorded in variable cache
replacements = 0
for block_def, arg in variable_def.items():
if arg == removed_phi.register:
variable_def[block_def] = replacement_memory
replacements += 1
logger.debug(
f"Replaced trivial Phi node: {removed_phi} ({removed_phi.register})"
f" with {replacement_memory} in current definition for {replacements} blocks"
)
return result
def seal_block(self, block: ir.BasicBlock) -> None:
logger.debug(f"Sealing {block}")
if self.is_sealed(block):
raise InternalError(f"Cannot seal a block that was already sealed: {block}")
for predecessor in block.predecessors:
if not predecessor.terminated:
raise InternalError(f"All predecessors must be terminated before sealing: {block}")
phi_list = self._incomplete_phis.pop(block, [])
for phi in phi_list:
self._add_phi_operands(phi, block)
self._sealed_blocks.add(block)
@attrs.define(kw_only=True)
class TrivialPhiRemover(IRMutator):
_find: ir.Phi
_replacement: ir.Register
collected: list[ir.Phi] = attrs.field(factory=list)
replaced: int = 0
@classmethod
def try_remove(
cls, phi: ir.Phi, blocks: Sequence[ir.BasicBlock]
) -> list[tuple[ir.Phi, ir.Register]]:
try:
replacement, *other = {a.value for a in phi.non_self_args}
except ValueError:
# # per https://pp.ipd.kit.edu/uploads/publikationen/buchwald16cc.pdf, section 4.1
logger.warning(f"Variable {phi.register.name} potentially used before assignment")
return []
result = []
# if other, then phi merges >1 value: not trivial
if not other:
result.append((phi, replacement))
# replace users of phi with replacement
logger.debug(f"Replacing trivial Phi node: {phi} ({phi.register}) with {replacement}")
# this approach is not very efficient, but is simpler than tracking all usages
# and good enough for now
replacer = cls(find=phi, replacement=replacement)
for block in blocks:
replacer.visit_block(block)
# recursively check/replace all phi users, which may now be trivial
for phi_user in replacer.collected:
result.extend(cls.try_remove(phi_user, blocks))
return result
def visit_register(self, reg: ir.Register) -> ir.Register:
if reg != self._find.register:
return reg
self.replaced += 1
return self._replacement
def visit_phi(self, phi: ir.Phi) -> ir.Phi | None:
if phi is self._find:
logger.debug(f"Deleting Phi assignment: {phi}")
return None
prior_replace_count = self.replaced
result = super().visit_phi(phi)
assert result is phi, "phi instance changed"
if self.replaced > prior_replace_count:
self.collected.append(result)
return result
|
algorandfoundation/puya
|
src/puya/ir/ssa.py
|
Python
|
NOASSERTION
| 9,388 |
import contextlib
import typing
from collections.abc import Iterator, Sequence
from puya import log
from puya.context import ArtifactCompileContext
from puya.ir import models
from puya.ir.types_ import IRType
from puya.ir.utils import format_bytes, format_error_comment
from puya.ir.visitor import IRVisitor
from puya.utils import make_path_relative_to_cwd
logger = log.get_logger(__name__)
class ToTextVisitor(IRVisitor[str]):
@typing.override
def visit_assignment(self, op: models.Assignment) -> str:
targets = ", ".join(f"{r.accept(self)}: {r.ir_type.name}" for r in op.targets)
if len(op.targets) > 1:
targets = f"({targets})"
source = op.source.accept(self)
return f"let {targets} = {source}"
@typing.override
def visit_register(self, op: models.Register) -> str:
return op.local_id
@typing.override
def visit_undefined(self, op: models.Undefined) -> str:
return "undefined"
@typing.override
def visit_uint64_constant(self, op: models.UInt64Constant) -> str:
return f"{op.value}u" if not op.teal_alias else op.teal_alias
@typing.override
def visit_biguint_constant(self, op: models.BigUIntConstant) -> str:
return f"{op.value}b"
@typing.override
def visit_bytes_constant(self, op: models.BytesConstant) -> str:
return format_bytes(op.value, op.encoding)
@typing.override
def visit_address_constant(self, op: models.AddressConstant) -> str:
return f"addr {op.value}"
@typing.override
def visit_method_constant(self, op: models.MethodConstant) -> str:
return f'method "{op.value}"'
@typing.override
def visit_itxn_constant(self, op: models.ITxnConstant) -> str:
return f"{op.ir_type.name}({op.value})"
@typing.override
def visit_compiled_contract_reference(self, const: models.CompiledContractReference) -> str:
return (
", ".join(
(
f"compiled_contract({const.artifact!r}",
f"field={const.field.name}",
f"program_page={const.program_page}",
*(
f"{var}={val.accept(self)}"
for var, val in const.template_variables.items()
),
)
)
+ ")"
)
@typing.override
def visit_compiled_logicsig_reference(self, const: models.CompiledLogicSigReference) -> str:
return (
", ".join(
(
f"compiled_logicsig({const.artifact!r}",
*(
f"{var}={val.accept(self)}"
for var, val in const.template_variables.items()
),
)
)
+ ")"
)
@typing.override
def visit_intrinsic_op(self, intrinsic: models.Intrinsic) -> str:
callee = intrinsic.op.code
immediates = list(map(str, intrinsic.immediates))
if immediates:
callee = f"({' '.join((callee, *immediates))})"
args = " ".join(arg.accept(self) for arg in intrinsic.args)
if args:
callee = f"({callee} {args})"
if intrinsic.error_message:
callee += f" // {format_error_comment(intrinsic.op.code, intrinsic.error_message)}"
return callee
@typing.override
def visit_inner_transaction_field(self, field: models.InnerTransactionField) -> str:
group = field.group_index.accept(self)
array_access = f"[{field.array_index.accept(self)}]" if field.array_index else ""
return f"itxn[{group}].{field.field}{array_access}"
@typing.override
def visit_invoke_subroutine(self, op: models.InvokeSubroutine) -> str:
args = ", ".join(a.accept(self) for a in op.args)
return f"{op.target.id}({args})"
@typing.override
def visit_conditional_branch(self, op: models.ConditionalBranch) -> str:
return f"goto {op.condition.accept(self)} ? {op.non_zero} : {op.zero}"
@typing.override
def visit_goto(self, op: models.Goto) -> str:
return f"goto {op.target}"
@typing.override
def visit_goto_nth(self, op: models.GotoNth) -> str:
blocks = ", ".join(map(str, op.blocks))
return f"goto_nth [{blocks}][{op.value.accept(self)}] else goto {op.default}"
@typing.override
def visit_switch(self, op: models.Switch) -> str:
cases = {k.accept(self): str(b) for k, b in op.cases.items()}
cases["*"] = str(op.default)
map_ = ", ".join(f"{k} => {v}" for k, v in cases.items())
return f"switch {op.value.accept(self)} {{{map_}}}"
@typing.override
def visit_subroutine_return(self, op: models.SubroutineReturn) -> str:
results = " ".join(r.accept(self) for r in op.result)
return f"return {results}"
@typing.override
def visit_template_var(self, deploy_var: models.TemplateVar) -> str:
return f"TemplateVar[{deploy_var.ir_type.name}]({deploy_var.name})"
@typing.override
def visit_program_exit(self, op: models.ProgramExit) -> str:
return f"exit {op.result.accept(self)}"
@typing.override
def visit_fail(self, op: models.Fail) -> str:
if op.error_message:
return f"fail // {op.error_message}"
return "fail"
@typing.override
def visit_phi(self, op: models.Phi) -> str:
r = op.register
target = f"{r.accept(self)}: {r.ir_type.name}"
if op.args:
args = ", ".join(a.accept(self) for a in op.args)
source = f"φ({args})"
else:
source = "undefined"
return f"let {target} = {source}"
@typing.override
def visit_phi_argument(self, op: models.PhiArgument) -> str:
return f"{op.value.accept(self)} <- {op.through}"
@typing.override
def visit_value_tuple(self, tup: models.ValueTuple) -> str:
return "(" + ", ".join(val.accept(self) for val in tup.values) + ")"
class TextEmitter:
def __init__(self) -> None:
self.lines = list[str]()
self._indent = 0
def append(self, line: str) -> None:
self.lines.append((self._indent * " ") + line)
@contextlib.contextmanager
def indent(self, spaces: int = 4) -> Iterator[None]:
self._indent += spaces
try:
yield
finally:
self._indent -= spaces
def _render_block_name(block: models.BasicBlock) -> str:
result = f"{block}: // "
if block.comment:
result += f"{block.comment}_"
result += f"L{block.source_location.line}"
return result
def _render_body(emitter: TextEmitter, blocks: Sequence[models.BasicBlock]) -> None:
renderer = ToTextVisitor()
for block in blocks:
assert block.terminated
emitter.append(_render_block_name(block))
with emitter.indent():
for op in block.all_ops:
emitter.append(op.accept(renderer))
def render_program(
context: ArtifactCompileContext, program: models.Program, *, qualifier: str
) -> None:
path = context.build_output_path(program.kind, qualifier, "ir")
if path is None:
return
emitter = TextEmitter()
emitter.append(f"main {program.main.id}:")
with emitter.indent():
_render_body(emitter, program.main.body)
for sub in program.subroutines:
emitter.append("")
args = ", ".join(f"{r.name}: {r.ir_type.name}" for r in sub.parameters)
match sub.returns:
case []:
returns = "void"
case [IRType(name=returns)]:
pass
case _ as ir_types:
returns = f"<{', '.join(t.name for t in ir_types)}>"
emitter.append(f"subroutine {sub.id}({args}) -> {returns}:")
with emitter.indent():
_render_body(emitter, sub.body)
path.write_text("\n".join(emitter.lines), encoding="utf-8")
logger.debug(f"Output IR to {make_path_relative_to_cwd(path)}")
|
algorandfoundation/puya
|
src/puya/ir/to_text_visitor.py
|
Python
|
NOASSERTION
| 8,037 |
import enum
import typing
from collections.abc import Sequence
from puya.avm import AVMType
from puya.awst import (
nodes as awst_nodes,
wtypes,
)
from puya.awst.nodes import BytesEncoding
from puya.errors import CodeError, InternalError
from puya.ir.avm_ops_models import StackType
from puya.parse import SourceLocation
@enum.unique
class AVMBytesEncoding(enum.StrEnum):
unknown = enum.auto()
base16 = enum.auto()
base32 = enum.auto()
base64 = enum.auto()
utf8 = enum.auto()
@enum.unique
class IRType(enum.StrEnum):
bytes = enum.auto()
uint64 = enum.auto()
bool = enum.auto()
biguint = enum.auto()
itxn_group_idx = enum.auto() # the group index of the result
itxn_field_set = enum.auto() # a collection of fields for a pending itxn submit
@property
def avm_type(self) -> typing.Literal[AVMType.uint64, AVMType.bytes]:
maybe_result = self.maybe_avm_type
if not isinstance(maybe_result, AVMType):
raise InternalError(f"{maybe_result} cannot be mapped to AVM stack type")
return maybe_result
@property
def maybe_avm_type(self) -> typing.Literal[AVMType.uint64, AVMType.bytes] | str:
match self:
case IRType.uint64 | IRType.bool:
return AVMType.uint64
case IRType.bytes | IRType.biguint:
return AVMType.bytes
case IRType.itxn_group_idx | IRType.itxn_field_set:
return self.name
case _:
typing.assert_never(self)
def bytes_enc_to_avm_bytes_enc(bytes_encoding: BytesEncoding) -> AVMBytesEncoding:
try:
return AVMBytesEncoding(bytes_encoding.value)
except ValueError as ex:
raise InternalError(f"Unhandled BytesEncoding: {bytes_encoding}") from ex
def wtype_to_ir_type(
expr_or_wtype: wtypes.WType | awst_nodes.Expression,
source_location: SourceLocation | None = None,
) -> IRType:
if isinstance(expr_or_wtype, awst_nodes.Expression):
return wtype_to_ir_type(
expr_or_wtype.wtype, source_location=source_location or expr_or_wtype.source_location
)
else:
wtype = expr_or_wtype
match wtype:
case wtypes.bool_wtype:
return IRType.bool
case wtypes.biguint_wtype:
return IRType.biguint
case wtypes.WInnerTransaction():
return IRType.itxn_group_idx
case wtypes.WInnerTransactionFields():
return IRType.itxn_field_set
case wtypes.void_wtype:
raise InternalError("can't translate void wtype to irtype", source_location)
# case wtypes.state_key:
# return IRType.state_key # TODO
# case wtypes.box_key:
# return IRType.box_key # TODO
match wtype.scalar_type:
case AVMType.uint64:
return IRType.uint64
case AVMType.bytes:
return IRType.bytes
case None:
raise CodeError(
f"unsupported nested/compound wtype encountered: {wtype}", source_location
)
case _:
typing.assert_never(wtype.scalar_type)
def get_wtype_arity(wtype: wtypes.WType) -> int:
"""Returns the number of values this wtype represents on the stack"""
if isinstance(wtype, wtypes.WTuple):
return sum_wtypes_arity(wtype.types)
else:
return 1
def sum_wtypes_arity(types: Sequence[wtypes.WType]) -> int:
return sum(map(get_wtype_arity, types))
def wtype_to_ir_types(
expr_or_wtype: wtypes.WType | awst_nodes.Expression,
source_location: SourceLocation | None = None,
) -> list[IRType]:
if isinstance(expr_or_wtype, awst_nodes.Expression):
wtype = expr_or_wtype.wtype
else:
wtype = expr_or_wtype
if wtype == wtypes.void_wtype:
return []
elif isinstance(wtype, wtypes.WTuple):
return [
ir_type
for wtype in wtype.types
for ir_type in wtype_to_ir_types(wtype, source_location)
]
else:
return [wtype_to_ir_type(wtype, source_location)]
def stack_type_to_avm_type(stack_type: StackType) -> AVMType:
match stack_type:
case StackType.uint64 | StackType.bool | StackType.asset | StackType.application:
return AVMType.uint64
case (
StackType.bytes
| StackType.bigint
| StackType.box_name
| StackType.address
| StackType.state_key
):
return AVMType.bytes
case StackType.any | StackType.address_or_index:
return AVMType.any
def stack_type_to_ir_type(stack_type: StackType) -> IRType | None:
match stack_type:
case StackType.bool:
return IRType.bool
case StackType.bigint:
return IRType.biguint
case StackType.uint64 | StackType.asset | StackType.application:
return IRType.uint64
case StackType.bytes | StackType.box_name | StackType.address | StackType.state_key:
return IRType.bytes
case StackType.any | StackType.address_or_index:
return None
case _:
typing.assert_never(stack_type)
|
algorandfoundation/puya
|
src/puya/ir/types_.py
|
Python
|
NOASSERTION
| 5,198 |
import base64
from collections.abc import Sequence
from puya.awst import (
nodes as awst_nodes,
wtypes,
)
from puya.ir.types_ import AVMBytesEncoding
def escape_utf8_string(s: str) -> str:
"""Escape a UTF-8 string for use in TEAL assembly.
Args:
s: A UTF-8 string to escape.
Returns:
An escaped version of the input string. This version will be surrounded in double quotes,
all special characters (such as \\n) will be escaped with additional backslashes, and all
Unicode characters beyond the latin-1 encoding will be encoded in hex escapes (e.g. \\xf0).
"""
# The point of this conversion is to escape all special characters and turn all Unicode
# characters into hex-escaped characters in the input string.
#
# The first step breaks up large Unicode characters into multiple UTF-8 hex characters:
# s_1 = s.encode("utf-8").decode("latin-1")
# e.g. "\n 😀" => "\n ð\x9f\x98\x80"
#
# The next step escapes all special characters:
# s_1.encode("unicode-escape").decode("latin-1")
# e.g. "\n ð\x9f\x98\x80" => "\\n \\xf0\\x9f\\x98\\x80"
#
# If we skipped the first step we would end up with Unicode codepoints instead of hex escaped
# characters, which TEAL assembly cannot process:
# s.encode("unicode-escape").decode("latin-1")
# e.g. "\n 😀" => "\\n \\U0001f600'"
s = s.encode("utf-8").decode("latin-1").encode("unicode-escape").decode("latin-1")
# Escape double quote characters (not covered by unicode-escape) but leave in single quotes
s = s.replace('"', '\\"')
# Surround string in double quotes
return '"' + s + '"'
def format_bytes(b: bytes, encoding: AVMBytesEncoding) -> str:
match encoding:
case AVMBytesEncoding.utf8:
return escape_utf8_string(b.decode())
case AVMBytesEncoding.base32:
return base64.b32encode(b).decode("ascii").rstrip("=")
case AVMBytesEncoding.base64:
return base64.b64encode(b).decode("ascii")
case AVMBytesEncoding.base16 | AVMBytesEncoding.unknown:
return f"0x{b.hex()}"
def format_tuple_index(var_type: wtypes.WTuple, base_name: str, index_or_name: int | str) -> str:
# If a named tuple is indexed numerical, convert this to the item name
if isinstance(index_or_name, int) and var_type.names is not None:
index_or_name = var_type.names[index_or_name]
return f"{base_name}.{index_or_name}"
def lvalue_items(tup: awst_nodes.TupleExpression) -> Sequence[awst_nodes.Lvalue]:
items = list[awst_nodes.Lvalue]()
for item in tup.items:
assert isinstance(item, awst_nodes.Lvalue)
items.append(item)
return items
def format_error_comment(op: str, error_message: str) -> str:
if op in ("err", "assert"):
return error_message
else:
return f"on error: {error_message}"
|
algorandfoundation/puya
|
src/puya/ir/utils.py
|
Python
|
NOASSERTION
| 2,916 |
algorandfoundation/puya
|
src/puya/ir/validation/__init__.py
|
Python
|
NOASSERTION
| 0 |
|
import abc
import typing
from puya.context import CompileContext
from puya.ir.avm_ops_models import RunMode
from puya.ir.models import Contract, LogicSignature, ModuleArtifact, Program
from puya.ir.visitor import IRTraverser
class DestructuredIRValidator(IRTraverser, abc.ABC):
def __init__(
self,
context: CompileContext,
program: Program,
run_mode: typing.Literal[RunMode.app, RunMode.lsig],
):
self.context = context
self.current_run_mode = run_mode
self.active_program = program
@classmethod
def validate(cls, context: CompileContext, artifact: ModuleArtifact) -> None:
match artifact:
case LogicSignature() as l_sig:
cls.validate_logic_sig(context, l_sig)
case Contract() as contract:
cls.validate_contract(context, contract)
case _:
typing.assert_never(artifact)
@classmethod
def validate_logic_sig(cls, context: CompileContext, logic_sig: LogicSignature) -> None:
validator = cls(context, logic_sig.program, RunMode.lsig)
for sub in logic_sig.program.all_subroutines:
validator.visit_all_blocks(sub.body)
@classmethod
def validate_contract(cls, context: CompileContext, contract: Contract) -> None:
for program in contract.all_programs():
validator = cls(context, program, RunMode.app)
for sub in program.all_subroutines:
validator.visit_all_blocks(sub.body)
|
algorandfoundation/puya
|
src/puya/ir/validation/_base.py
|
Python
|
NOASSERTION
| 1,528 |
import typing
from collections.abc import Iterable
from puya import log
from puya.ir.models import (
CompiledContractReference,
CompiledLogicSigReference,
Constant,
TemplateVar,
Value,
)
from puya.ir.validation._base import DestructuredIRValidator
logger = log.get_logger(__name__)
class CompileReferenceValidator(DestructuredIRValidator):
@typing.override
def visit_compiled_contract_reference(self, const: CompiledContractReference) -> None:
_log_non_constant_values(const.template_variables.values())
@typing.override
def visit_compiled_logicsig_reference(self, const: CompiledLogicSigReference) -> None:
_log_non_constant_values(const.template_variables.values())
def _log_non_constant_values(values: Iterable[Value]) -> None:
for value in values:
if isinstance(value, Constant):
continue
if isinstance(value, TemplateVar):
logger.error(
"nested template variables are not supported",
location=value.source_location,
)
else:
logger.error(
"non-constant template value",
location=value.source_location,
)
|
algorandfoundation/puya
|
src/puya/ir/validation/compile_reference_validator.py
|
Python
|
NOASSERTION
| 1,219 |
import typing
from puya import log
from puya.ir.models import InnerTransactionField, ITxnConstant
from puya.ir.validation._base import DestructuredIRValidator
logger = log.get_logger(__name__)
class ITxnResultFieldValidator(DestructuredIRValidator):
@typing.override
def visit_itxn_constant(self, const: ITxnConstant) -> None:
logger.info(
"Potential cause of field access with non-constant group index",
location=const.source_location,
)
@typing.override
def visit_inner_transaction_field(self, field: InnerTransactionField) -> None:
logger.error(
"Inner transaction field access with non constant group index,"
" to resolve move field access to same code path where the inner transaction is"
" submitted",
location=field.source_location,
)
|
algorandfoundation/puya
|
src/puya/ir/validation/itxn_result_field_validator.py
|
Python
|
NOASSERTION
| 867 |
import attrs
from puya.context import CompileContext
from puya.ir.models import ModuleArtifact
from puya.ir.validation.compile_reference_validator import CompileReferenceValidator
from puya.ir.validation.itxn_result_field_validator import ITxnResultFieldValidator
from puya.ir.validation.min_avm_version_validator import MinAvmVersionValidator
from puya.ir.validation.op_run_mode_validator import OpRunModeValidator
def validate_module_artifact(context: CompileContext, artifact: ModuleArtifact) -> None:
attrs.validate(artifact)
for validator_cls in (
OpRunModeValidator,
MinAvmVersionValidator,
ITxnResultFieldValidator,
CompileReferenceValidator,
):
validator_cls.validate(context, artifact)
|
algorandfoundation/puya
|
src/puya/ir/validation/main.py
|
Python
|
NOASSERTION
| 750 |
import typing
from puya import log
from puya.ir.models import Intrinsic
from puya.ir.validation._base import DestructuredIRValidator
logger = log.get_logger(__name__)
class MinAvmVersionValidator(DestructuredIRValidator):
@typing.override
def visit_intrinsic_op(self, intrinsic: Intrinsic) -> None:
program_avm_version = self.active_program.avm_version
op_avm_version = intrinsic.op_variant.min_avm_version
if op_avm_version > program_avm_version:
op_desc = intrinsic.op.value
# if variant min version differs from op min version, then include variant enum
if op_avm_version != intrinsic.op.min_avm_version:
op_desc += f" {intrinsic.op_variant.enum}"
logger.error(
f"Opcode {op_desc!r} requires a min AVM version of "
f"{op_avm_version} but the target AVM version is"
f" {program_avm_version}",
location=intrinsic.source_location,
)
|
algorandfoundation/puya
|
src/puya/ir/validation/min_avm_version_validator.py
|
Python
|
NOASSERTION
| 1,009 |
import typing
from puya import log
from puya.ir.avm_ops_models import RunMode
from puya.ir.models import Intrinsic
from puya.ir.validation._base import DestructuredIRValidator
logger = log.get_logger(__name__)
class OpRunModeValidator(DestructuredIRValidator):
@typing.override
def visit_intrinsic_op(self, intrinsic: Intrinsic) -> None:
match intrinsic.op_variant.supported_modes:
case RunMode.any:
pass
case RunMode.app:
if self.current_run_mode != RunMode.app:
logger.warning(
f"The operation {intrinsic} is only allowed in smart contracts",
location=intrinsic.source_location,
)
case RunMode.lsig:
if self.current_run_mode != RunMode.lsig:
logger.warning(
f"The operation {intrinsic} is only allowed in logic signatures",
location=intrinsic.source_location,
)
case _:
typing.assert_never(intrinsic.op_variant.supported_modes)
|
algorandfoundation/puya
|
src/puya/ir/validation/op_run_mode_validator.py
|
Python
|
NOASSERTION
| 1,138 |
# ruff: noqa: ARG002
from __future__ import annotations # needed to break import cycle
import typing as t
from abc import ABC, abstractmethod
if t.TYPE_CHECKING:
from collections.abc import Iterable
import puya.ir.models
class IRVisitor[T](ABC):
@abstractmethod
def visit_assignment(self, ass: puya.ir.models.Assignment) -> T: ...
@abstractmethod
def visit_register(self, reg: puya.ir.models.Register) -> T: ...
@abstractmethod
def visit_undefined(self, val: puya.ir.models.Undefined) -> T: ...
@abstractmethod
def visit_uint64_constant(self, const: puya.ir.models.UInt64Constant) -> T: ...
@abstractmethod
def visit_biguint_constant(self, const: puya.ir.models.BigUIntConstant) -> T: ...
@abstractmethod
def visit_bytes_constant(self, const: puya.ir.models.BytesConstant) -> T: ...
@abstractmethod
def visit_compiled_contract_reference(
self, const: puya.ir.models.CompiledContractReference
) -> T: ...
@abstractmethod
def visit_compiled_logicsig_reference(
self, const: puya.ir.models.CompiledLogicSigReference
) -> T: ...
@abstractmethod
def visit_address_constant(self, const: puya.ir.models.AddressConstant) -> T: ...
@abstractmethod
def visit_method_constant(self, const: puya.ir.models.MethodConstant) -> T: ...
@abstractmethod
def visit_itxn_constant(self, const: puya.ir.models.ITxnConstant) -> T: ...
@abstractmethod
def visit_phi(self, phi: puya.ir.models.Phi) -> T: ...
@abstractmethod
def visit_phi_argument(self, arg: puya.ir.models.PhiArgument) -> T: ...
@abstractmethod
def visit_intrinsic_op(self, intrinsic: puya.ir.models.Intrinsic) -> T: ...
@abstractmethod
def visit_inner_transaction_field(
self, intrinsic: puya.ir.models.InnerTransactionField
) -> T: ...
@abstractmethod
def visit_invoke_subroutine(self, callsub: puya.ir.models.InvokeSubroutine) -> T: ...
@abstractmethod
def visit_value_tuple(self, tup: puya.ir.models.ValueTuple) -> T: ...
@abstractmethod
def visit_conditional_branch(self, branch: puya.ir.models.ConditionalBranch) -> T: ...
@abstractmethod
def visit_goto(self, goto: puya.ir.models.Goto) -> T: ...
@abstractmethod
def visit_goto_nth(self, goto_nth: puya.ir.models.GotoNth) -> T: ...
@abstractmethod
def visit_switch(self, switch: puya.ir.models.Switch) -> T: ...
@abstractmethod
def visit_subroutine_return(self, retsub: puya.ir.models.SubroutineReturn) -> T: ...
@abstractmethod
def visit_program_exit(self, exit_: puya.ir.models.ProgramExit) -> T: ...
@abstractmethod
def visit_fail(self, fail: puya.ir.models.Fail) -> T: ...
@abstractmethod
def visit_template_var(self, deploy_var: puya.ir.models.TemplateVar) -> T: ...
class IRTraverser(IRVisitor[None]):
def visit_all_blocks(self, blocks: Iterable[puya.ir.models.BasicBlock]) -> None:
for block in blocks:
self.visit_block(block)
def visit_block(self, block: puya.ir.models.BasicBlock) -> None:
for op in list(block.all_ops): # make a copy in case visitors need to modify ops
op.accept(self)
def visit_assignment(self, ass: puya.ir.models.Assignment) -> None:
for target in ass.targets:
target.accept(self)
ass.source.accept(self)
def visit_register(self, reg: puya.ir.models.Register) -> None:
pass
def visit_undefined(self, val: puya.ir.models.Undefined) -> None:
pass
def visit_uint64_constant(self, const: puya.ir.models.UInt64Constant) -> None:
pass
def visit_biguint_constant(self, const: puya.ir.models.BigUIntConstant) -> None:
pass
def visit_bytes_constant(self, const: puya.ir.models.BytesConstant) -> None:
pass
def visit_address_constant(self, const: puya.ir.models.AddressConstant) -> None:
pass
def visit_template_var(self, deploy_var: puya.ir.models.TemplateVar) -> None:
pass
def visit_method_constant(self, const: puya.ir.models.MethodConstant) -> None:
pass
def visit_compiled_contract_reference(
self, const: puya.ir.models.CompiledContractReference
) -> None:
for var in const.template_variables.values():
var.accept(self)
def visit_compiled_logicsig_reference(
self, const: puya.ir.models.CompiledLogicSigReference
) -> None:
for var in const.template_variables.values():
var.accept(self)
def visit_itxn_constant(self, const: puya.ir.models.ITxnConstant) -> None:
pass
def visit_phi(self, phi: puya.ir.models.Phi) -> None:
phi.register.accept(self)
for arg in phi.args:
arg.accept(self)
def visit_phi_argument(self, arg: puya.ir.models.PhiArgument) -> None:
arg.value.accept(self)
def visit_intrinsic_op(self, intrinsic: puya.ir.models.Intrinsic) -> None:
for arg in intrinsic.args:
arg.accept(self)
def visit_inner_transaction_field(self, field: puya.ir.models.InnerTransactionField) -> None:
field.group_index.accept(self)
field.is_last_in_group.accept(self)
if field.array_index:
field.array_index.accept(self)
def visit_invoke_subroutine(self, callsub: puya.ir.models.InvokeSubroutine) -> None:
for arg in callsub.args:
arg.accept(self)
def visit_conditional_branch(self, branch: puya.ir.models.ConditionalBranch) -> None:
branch.condition.accept(self)
def visit_goto(self, goto: puya.ir.models.Goto) -> None:
pass
def visit_goto_nth(self, goto_nth: puya.ir.models.GotoNth) -> None:
goto_nth.value.accept(self)
def visit_switch(self, switch: puya.ir.models.Switch) -> None:
switch.value.accept(self)
for case in switch.cases:
case.accept(self)
def visit_subroutine_return(self, retsub: puya.ir.models.SubroutineReturn) -> None:
for r in retsub.result:
r.accept(self)
def visit_program_exit(self, exit_: puya.ir.models.ProgramExit) -> None:
exit_.result.accept(self)
def visit_fail(self, fail: puya.ir.models.Fail) -> None:
pass
def visit_value_tuple(self, tup: puya.ir.models.ValueTuple) -> None:
for v in tup.values:
v.accept(self)
class NoOpIRVisitor[T](IRVisitor[T | None]):
def visit_assignment(self, ass: puya.ir.models.Assignment) -> T | None:
return None
def visit_register(self, reg: puya.ir.models.Register) -> T | None:
return None
def visit_undefined(self, val: puya.ir.models.Undefined) -> T | None:
return None
def visit_uint64_constant(self, const: puya.ir.models.UInt64Constant) -> T | None:
return None
def visit_biguint_constant(self, const: puya.ir.models.BigUIntConstant) -> T | None:
return None
def visit_bytes_constant(self, const: puya.ir.models.BytesConstant) -> T | None:
return None
def visit_address_constant(self, const: puya.ir.models.AddressConstant) -> T | None:
return None
def visit_method_constant(self, const: puya.ir.models.MethodConstant) -> T | None:
return None
def visit_itxn_constant(self, const: puya.ir.models.ITxnConstant) -> T | None:
return None
def visit_compiled_contract_reference(
self, const: puya.ir.models.CompiledContractReference
) -> T | None:
return None
def visit_compiled_logicsig_reference(
self, const: puya.ir.models.CompiledLogicSigReference
) -> T | None:
return None
def visit_phi(self, phi: puya.ir.models.Phi) -> T | None:
return None
def visit_template_var(self, deploy_var: puya.ir.models.TemplateVar) -> T | None:
return None
def visit_phi_argument(self, arg: puya.ir.models.PhiArgument) -> T | None:
return None
def visit_intrinsic_op(self, intrinsic: puya.ir.models.Intrinsic) -> T | None:
return None
def visit_inner_transaction_field(
self, field: puya.ir.models.InnerTransactionField
) -> T | None:
return None
def visit_invoke_subroutine(self, callsub: puya.ir.models.InvokeSubroutine) -> T | None:
return None
def visit_value_tuple(self, tup: puya.ir.models.ValueTuple) -> T | None:
return None
def visit_conditional_branch(self, branch: puya.ir.models.ConditionalBranch) -> T | None:
return None
def visit_goto(self, goto: puya.ir.models.Goto) -> T | None:
return None
def visit_goto_nth(self, goto_nth: puya.ir.models.GotoNth) -> T | None:
return None
def visit_switch(self, switch: puya.ir.models.Switch) -> T | None:
return None
def visit_subroutine_return(self, retsub: puya.ir.models.SubroutineReturn) -> T | None:
return None
def visit_program_exit(self, exit_: puya.ir.models.ProgramExit) -> T | None:
return None
def visit_fail(self, fail: puya.ir.models.Fail) -> T | None:
return None
|
algorandfoundation/puya
|
src/puya/ir/visitor.py
|
Python
|
NOASSERTION
| 9,096 |
from collections.abc import Iterable, Mapping
import attrs
from puya.ir import models
from puya.ir.visitor_mutator import IRMutator
@attrs.define
class MemoryReplacer(IRMutator):
_replacements: Mapping[models.Register, models.Register]
replaced: int = 0
@classmethod
def apply(
cls,
blocks: Iterable[models.BasicBlock],
*,
replacements: Mapping[models.Register, models.Register],
) -> int:
if not replacements:
return 0
replacer = cls(replacements=replacements)
for block in blocks:
replacer.visit_block(block)
return replacer.replaced
def visit_register(self, reg: models.Register) -> models.Register:
try:
replacement = self._replacements[reg]
except KeyError:
return reg
self.replaced += 1
return replacement
|
algorandfoundation/puya
|
src/puya/ir/visitor_mem_replacer.py
|
Python
|
NOASSERTION
| 886 |
import typing as t
import attrs
from puya.ir import models
from puya.ir.visitor import IRVisitor
@attrs.define
class IRMutator(IRVisitor[t.Any]):
def visit_block(self, block: models.BasicBlock) -> None:
new_phis = []
for phi in block.phis:
new_phi = self.visit_phi(phi)
if new_phi is not None:
new_phis.append(new_phi)
block.phis = new_phis
new_ops = []
for op in block.ops:
new_op = op.accept(self)
if new_op is not None:
new_ops.append(new_op)
block.ops = new_ops
if block.terminator is not None:
block.terminator = block.terminator.accept(self)
def visit_assignment(self, ass: models.Assignment) -> models.Assignment | None:
ass.targets = [self.visit_register(r) for r in ass.targets]
ass.source = ass.source.accept(self)
return ass
def visit_register(self, reg: models.Register) -> models.Register:
return reg
def visit_undefined(self, val: models.Undefined) -> models.Undefined:
return val
def visit_template_var(self, deploy_var: models.TemplateVar) -> models.TemplateVar:
return deploy_var
def visit_uint64_constant(self, const: models.UInt64Constant) -> models.UInt64Constant:
return const
def visit_biguint_constant(self, const: models.BigUIntConstant) -> models.BigUIntConstant:
return const
def visit_bytes_constant(self, const: models.BytesConstant) -> models.BytesConstant:
return const
def visit_address_constant(self, const: models.AddressConstant) -> models.AddressConstant:
return const
def visit_method_constant(self, const: models.MethodConstant) -> models.MethodConstant:
return const
def visit_itxn_constant(self, const: models.ITxnConstant) -> models.ITxnConstant:
return const
def visit_compiled_contract_reference(
self, const: models.CompiledContractReference
) -> models.CompiledContractReference:
return attrs.evolve(
const,
template_variables={
var: value.accept(self) for var, value in const.template_variables.items()
},
)
def visit_compiled_logicsig_reference(
self, const: models.CompiledLogicSigReference
) -> models.CompiledLogicSigReference:
return attrs.evolve(
const,
template_variables={
var: value.accept(self) for var, value in const.template_variables.items()
},
)
def visit_phi(self, phi: models.Phi) -> models.Phi | None:
phi.register = self.visit_register(phi.register)
phi.args = [self.visit_phi_argument(a) for a in phi.args]
return phi
def visit_phi_argument(self, arg: models.PhiArgument) -> models.PhiArgument:
arg.value = arg.value.accept(self)
return arg
def visit_intrinsic_op(self, intrinsic: models.Intrinsic) -> models.Intrinsic | None:
intrinsic.args = [a.accept(self) for a in intrinsic.args]
return intrinsic
def visit_inner_transaction_field(
self, field: models.InnerTransactionField
) -> models.InnerTransactionField | models.Intrinsic:
field.group_index = field.group_index.accept(self)
field.is_last_in_group = field.is_last_in_group.accept(self)
if field.array_index:
field.array_index = field.array_index.accept(self)
return field
def visit_invoke_subroutine(self, callsub: models.InvokeSubroutine) -> models.InvokeSubroutine:
callsub.args = [a.accept(self) for a in callsub.args]
return callsub
def visit_conditional_branch(self, branch: models.ConditionalBranch) -> models.ControlOp:
branch.condition = branch.condition.accept(self)
return branch
def visit_goto_nth(self, goto_nth: models.GotoNth) -> models.ControlOp:
goto_nth.value = goto_nth.value.accept(self)
return goto_nth
def visit_goto(self, goto: models.Goto) -> models.ControlOp:
return goto
def visit_switch(self, switch: models.Switch) -> models.ControlOp:
switch.value = switch.value.accept(self)
switch.cases = {case.accept(self): target for case, target in switch.cases.items()}
return switch
def visit_subroutine_return(self, retsub: models.SubroutineReturn) -> models.ControlOp:
retsub.result = [v.accept(self) for v in retsub.result]
return retsub
def visit_program_exit(self, exit_: models.ProgramExit) -> models.ControlOp:
exit_.result = exit_.result.accept(self)
return exit_
def visit_fail(self, fail: models.Fail) -> models.ControlOp:
return fail
def visit_value_tuple(self, tup: models.ValueTuple) -> models.ValueTuple:
tup.values = [v.accept(self) for v in tup.values]
return tup
|
algorandfoundation/puya
|
src/puya/ir/visitor_mutator.py
|
Python
|
NOASSERTION
| 4,908 |
import itertools
import typing
from collections.abc import Sequence, Set
import attrs
from puya.errors import InternalError
from puya.ir import models as ops
from puya.ir.visitor import IRTraverser
from puya.utils import StableSet
IrOp: typing.TypeAlias = ops.Op | ops.ControlOp | ops.Phi
@attrs.define(kw_only=True)
class _OpLifetime:
block: ops.BasicBlock
used: StableSet[ops.Register] = attrs.field(on_setattr=attrs.setters.frozen)
defined: StableSet[ops.Register] = attrs.field(on_setattr=attrs.setters.frozen)
successors: Sequence[IrOp] = attrs.field(on_setattr=attrs.setters.frozen)
live_in: StableSet[ops.Register] = attrs.field(factory=StableSet)
live_out: StableSet[ops.Register] = attrs.field(factory=StableSet)
@attrs.define
class _VlaTraverser(IRTraverser):
used: StableSet[ops.Register] = attrs.field(factory=StableSet)
defined: StableSet[ops.Register] = attrs.field(factory=StableSet)
@classmethod
def apply(cls, op: IrOp) -> tuple[StableSet[ops.Register], StableSet[ops.Register]]:
traverser = cls()
op.accept(traverser)
return traverser.used, traverser.defined
def visit_register(self, reg: ops.Register) -> None:
self.used.add(reg)
def visit_assignment(self, ass: ops.Assignment) -> None:
ass.source.accept(self)
self.defined = StableSet.from_iter(ass.targets)
def visit_phi(self, _phi: ops.Phi) -> None:
# WARNING: this is slightly trickier than it might seem for SSA,
# consider how this translates when coming out of SSA -
# the target register isn't defined here, but defined at
# the end of each predecessor block.
# Similarly, the arguments aren't live-in at this location necessarily.
raise InternalError("IR VLA not capable of handling SSA yet")
@attrs.define
class VariableLifetimeAnalysis:
"""Performs VLA analysis for a subroutine, providing a mapping of ops to sets of live local_ids
see https://www.classes.cs.uchicago.edu/archive/2004/spring/22620-1/docs/liveness.pdf"""
subroutine: ops.Subroutine
_op_lifetimes: dict[IrOp, _OpLifetime] = attrs.field(init=False)
@_op_lifetimes.default
def _op_lifetimes_factory(self) -> dict[IrOp, _OpLifetime]:
result = dict[IrOp, _OpLifetime]()
block_map = {b.id: next(b.all_ops) for b in self.subroutine.body}
for block in self.subroutine.body:
assert not block.phis
all_ops = list(block.all_ops)
for op, next_op in itertools.zip_longest(all_ops, all_ops[1:]):
used, defined = _VlaTraverser.apply(op)
if next_op is None:
# for last op, add first op of each successor block
successors = [block_map[s.id] for s in block.successors]
else:
successors = [next_op]
result[op] = _OpLifetime(
block=block,
used=used,
defined=defined,
successors=successors,
)
return result
def get_live_out_variables(self, op: IrOp) -> Set[ops.Register]:
return self._op_lifetimes[op].live_out
def get_live_in_variables(self, op: IrOp) -> Set[ops.Register]:
return self._op_lifetimes[op].live_in
@classmethod
def analyze(cls, subroutine: ops.Subroutine) -> typing.Self:
analysis = cls(subroutine)
analysis._analyze() # noqa: SLF001
return analysis
def _analyze(self) -> None:
changes = True
while changes:
changes = False
for n in self._op_lifetimes.values():
# For OUT, find out the union of previous variables
# in the IN set for each succeeding node of n.
# out[n] = U s ∈ succ[n] in[s]
live_out = StableSet[ops.Register]()
for s in n.successors:
live_out |= self._op_lifetimes[s].live_in
# in[n] = use[n] U (out[n] - def [n])
live_in = n.used | (live_out - n.defined)
if not (live_in == n.live_in and live_out == n.live_out):
n.live_in = live_in
n.live_out = live_out
changes = True
|
algorandfoundation/puya
|
src/puya/ir/vla.py
|
Python
|
NOASSERTION
| 4,360 |
import contextlib
import json
import logging
import os.path
import sys
import typing
from collections import Counter
from collections.abc import Iterator, Mapping, Sequence
from contextvars import ContextVar
from enum import IntEnum, StrEnum, auto
from io import StringIO
from pathlib import Path
import attrs
import structlog
from puya.parse import SourceLocation
class LogFormat(StrEnum):
default = auto()
json = auto()
@staticmethod
def from_string(s: str) -> "LogFormat":
try:
return LogFormat[s]
except KeyError as err:
raise ValueError from err
class LogLevel(IntEnum):
notset = logging.NOTSET
debug = logging.DEBUG
info = logging.INFO
warning = logging.WARNING
error = logging.ERROR
critical = logging.CRITICAL
def __str__(self) -> str:
return self.name
@staticmethod
def from_string(s: str) -> "LogLevel":
try:
return LogLevel[s]
except KeyError as err:
raise ValueError from err
@attrs.frozen
class Log:
level: LogLevel
message: str
location: SourceLocation | None
@property
def file(self) -> Path | None:
return None if self.location is None else self.location.file
@property
def line(self) -> int | None:
return None if self.location is None else self.location.line
@attrs.define
class LoggingContext:
logs: list[Log] = attrs.field(factory=list)
sources_by_path: Mapping[Path, Sequence[str] | None] | None = None
def _log_level_counts(self) -> Mapping[LogLevel, int]:
return Counter(log.level for log in self.logs)
@property
def num_errors(self) -> int:
level_counts = self._log_level_counts()
return sum(count for level, count in level_counts.items() if level >= LogLevel.error)
def exit_if_errors(self) -> None:
level_counts = self._log_level_counts()
if level_counts[LogLevel.critical]:
sys.exit(2)
elif level_counts[LogLevel.error]:
sys.exit(1)
_current_ctx: ContextVar[LoggingContext] = ContextVar("current_ctx")
class PuyaJsonRender(structlog.processors.JSONRenderer):
def __init__(self, *, base_path: str) -> None:
super().__init__()
self.base_path = base_path + os.path.sep
def _location_json(
self, location: SourceLocation | None
) -> Mapping[str, str | int | None] | None:
if not location or not location.file:
return None
file = str(location.file)
if file.startswith(self.base_path):
file = file[len(self.base_path) :]
return {
"file": file,
"line": location.line,
"end_line": location.end_line,
"column": location.column,
"end_column": location.end_column,
}
def __call__(
self,
_logger: structlog.typing.WrappedLogger,
_name: str,
event_dict: structlog.typing.EventDict,
) -> str:
# force event to str for compatibility with standard library
event = event_dict.pop("event", None)
if not isinstance(event, str):
event = str(event)
important: bool = event_dict.pop("important", False)
location: SourceLocation | None = event_dict.pop("location", None)
level = event_dict.pop("level", "info")
return json.dumps(
{
"level": level,
"location": self._location_json(location),
"message": event,
"important": important,
}
)
class PuyaConsoleRender(structlog.dev.ConsoleRenderer):
def __init__(self, *, colors: bool, base_path: str):
super().__init__(colors=colors)
self.level_to_color = self.get_default_level_styles(colors)
self.base_path = base_path
if not self.base_path.endswith(
os.path.sep
): # TODO: can we always append the path seperator?
self.base_path += os.path.sep
def _location_as_link(self, location: SourceLocation | None) -> str:
if not location or not location.file:
return ""
file = str(location.file)
if file.startswith(self.base_path):
file = file[len(self.base_path) :]
line = str(location.line) if location.line else "1"
col = f":{location.column + 1}" if location.column else ""
return f"{file}:{line}{col}"
def __call__(
self,
_logger: structlog.typing.WrappedLogger,
_name: str,
event_dict: structlog.typing.EventDict,
) -> str:
# force event to str for compatibility with standard library
event = event_dict.pop("event", None)
if not isinstance(event, str):
event = str(event)
lines = [event]
related_errors = event_dict.pop("related_lines", None)
if related_errors:
assert isinstance(related_errors, list)
lines.extend(related_errors)
important: bool = event_dict.pop("important", False)
location: SourceLocation | None = event_dict.pop("location", None)
if location and location.file is None:
location = None
location_as_link = self._location_as_link(location) if location else ""
level = event_dict.pop("level", "info")
align_related_lines = " " * (len(location_as_link) + 1 + len(level) + 1)
sio = StringIO()
reset_colour = self._styles.reset
if important:
sio.write(self._styles.bright)
reset_colour += self._styles.bright
for idx, line in enumerate(lines):
if idx:
sio.write("\n")
sio.write(align_related_lines)
else:
if location:
sio.write(self._styles.logger_name)
location_link = self._location_as_link(location)
sio.write(location_link)
sio.write(" ")
sio.write(reset_colour)
sio.write(self.level_to_color.get(level, ""))
sio.write(level)
sio.write(": ")
sio.write(reset_colour)
sio.write(line)
sio.write(self._styles.reset)
stack = event_dict.pop("stack", None)
exc = event_dict.pop("exception", None)
exc_info = event_dict.pop("exc_info", None)
event_dict_keys: typing.Iterable[str] = event_dict.keys()
if self._sort_keys:
event_dict_keys = sorted(event_dict_keys)
sio.write(
" ".join(
self._styles.kv_key
+ key
+ self._styles.reset
+ "="
+ self._styles.kv_value
+ self._repr(event_dict[key])
+ self._styles.reset
for key in event_dict_keys
)
)
if stack is not None:
sio.write("\n" + stack)
if exc_info or exc is not None:
sio.write("\n\n" + "=" * 79 + "\n")
if exc_info:
exc_info = figure_out_exc_info(exc_info)
self._exception_formatter(sio, exc_info)
elif exc is not None:
sio.write("\n" + exc)
return sio.getvalue()
# copied from structlog.dev._figure_out_exc_info
def figure_out_exc_info(
v: BaseException | structlog.typing.ExcInfo | bool,
) -> structlog.typing.ExcInfo | bool:
"""
Depending on the Python version will try to do the smartest thing possible
to transform *v* into an ``exc_info`` tuple.
"""
if isinstance(v, BaseException):
return type(v), v, v.__traceback__
if isinstance(v, tuple):
return v
if v:
return sys.exc_info() # type: ignore[return-value]
return v
@attrs.define
class FilterByLogLevel:
min_log_level: LogLevel
def __call__(
self,
_logger: structlog.typing.WrappedLogger,
method: str,
event_dict: structlog.typing.EventDict,
) -> structlog.typing.EventDict:
if LogLevel[method] < self.min_log_level:
raise structlog.DropEvent
return event_dict
def configure_logging(
*,
min_log_level: LogLevel = LogLevel.notset,
cache_logger: bool = True,
log_format: LogFormat = LogFormat.default,
) -> None:
if cache_logger and structlog.is_configured():
raise ValueError(
"Logging can not be configured more than once if using cache_logger = True"
)
base_path = str(Path.cwd()) # TODO: don't assume this?
match log_format:
case LogFormat.json:
log_renderer: structlog.typing.Processor = PuyaJsonRender(base_path=base_path)
case LogFormat.default:
log_renderer = PuyaConsoleRender(
colors="NO_COLOR" not in os.environ, base_path=base_path
)
case never:
typing.assert_never(never)
processors: list[structlog.typing.Processor] = [
structlog.contextvars.merge_contextvars,
structlog.processors.add_log_level,
structlog.processors.StackInfoRenderer(),
log_renderer,
]
if min_log_level != LogLevel.notset:
# filtering via a processor instead of via the logger like
# structlog.make_filtering_bound_logger(min_log_level.value)
# so that structlog.testing.capture_logs() still works in test cases
processors.insert(0, FilterByLogLevel(min_log_level))
structlog.configure(
processors=processors,
context_class=dict,
logger_factory=structlog.PrintLoggerFactory(),
cache_logger_on_first_use=cache_logger,
)
class _Logger:
def __init__(self, name: str, initial_values: dict[str, typing.Any]):
self._logger = structlog.get_logger(name, **initial_values)
def debug(
self,
event: object,
*args: typing.Any,
location: SourceLocation | None = None,
**kwargs: typing.Any,
) -> None:
self._report(LogLevel.debug, event, *args, location=location, **kwargs)
def info(
self,
event: object,
*args: typing.Any,
location: SourceLocation | None = None,
**kwargs: typing.Any,
) -> None:
self._report(LogLevel.info, event, *args, location=location, **kwargs)
def warning(
self,
event: object,
*args: typing.Any,
location: SourceLocation | None = None,
**kwargs: typing.Any,
) -> None:
self._report(LogLevel.warning, event, *args, location=location, **kwargs)
def error(
self,
event: object,
*args: typing.Any,
location: SourceLocation | None = None,
**kwargs: typing.Any,
) -> None:
self._report(LogLevel.error, event, *args, location=location, **kwargs)
def exception(
self,
event: object,
*args: typing.Any,
location: SourceLocation | None = None,
**kwargs: typing.Any,
) -> None:
kwargs.setdefault("exc_info", True)
self._report(LogLevel.critical, event, *args, location=location, **kwargs)
def critical(
self,
event: object,
*args: typing.Any,
location: SourceLocation | None = None,
**kwargs: typing.Any,
) -> None:
self._report(LogLevel.critical, event, *args, location=location, **kwargs)
def log(
self,
level: LogLevel,
event: object,
*args: typing.Any,
location: SourceLocation | None = None,
**kwargs: typing.Any,
) -> None:
self._report(level, event, *args, location=location, **kwargs)
def _report(
self,
level: LogLevel,
event: object,
*args: typing.Any,
location: SourceLocation | None = None,
**kwargs: typing.Any,
) -> None:
log_ctx = _current_ctx.get(None)
if (
level >= LogLevel.error
and location
and log_ctx
and log_ctx.sources_by_path
and location.file
):
file_source = log_ctx.sources_by_path.get(location.file)
if file_source is not None:
kwargs["related_lines"] = _get_pretty_source(file_source, location)
self._logger.log(level, event, *args, location=location, **kwargs)
if log_ctx:
if isinstance(event, str) and args:
message = event % args
else:
message = str(event)
log_ctx.logs.append(Log(level, message, location))
def _get_pretty_source(
file_source: Sequence[str], location: SourceLocation
) -> Sequence[str] | None:
lines = file_source[location.line - 1 : location.end_line]
if len(lines) != location.line_count:
logger = get_logger(__name__)
logger.warning(f"source length mismatch for {location}")
return None
try:
(source_line,) = lines
except ValueError:
# source line is followed by additional lines, don't bother annotating columns
return lines
# Shifts column after tab expansion
column = len(source_line[: location.column].expandtabs())
end_column = len(source_line[: location.end_column].expandtabs())
return [
source_line.expandtabs(),
" " * column + f"^{'~' * max(end_column - column - 1, 0)}",
]
def get_logger(name: str, **initial_values: typing.Any) -> _Logger:
return _Logger(name, initial_values)
@contextlib.contextmanager
def logging_context() -> Iterator[LoggingContext]:
ctx = LoggingContext()
restore = _current_ctx.set(ctx)
try:
yield ctx
finally:
_current_ctx.reset(restore)
|
algorandfoundation/puya
|
src/puya/log.py
|
Python
|
NOASSERTION
| 13,780 |
from pathlib import Path
import attrs
import cattrs.preconf.json
from puya import log
from puya.awst import (
nodes as awst_nodes,
serialize,
)
from puya.compile import awst_to_teal
from puya.errors import log_exceptions
from puya.options import PuyaOptions
from puya.program_refs import ContractReference, LogicSigReference
logger = log.get_logger(__name__)
@attrs.frozen(kw_only=True)
class _PuyaOptionsWithCompilationSet(PuyaOptions):
compilation_set: dict[str, Path]
def main(*, options_json: str, awst_json: str, source_annotations_json: str | None) -> None:
with log.logging_context() as log_ctx, log_exceptions():
json_converter = cattrs.preconf.json.make_converter()
sources_by_path = {}
if source_annotations_json:
sources_by_path = json_converter.loads(
source_annotations_json, dict[Path, list[str] | None]
)
log_ctx.sources_by_path = sources_by_path
awst = serialize.awst_from_json(awst_json)
options = json_converter.loads(options_json, _PuyaOptionsWithCompilationSet)
compilation_set = dict[ContractReference | LogicSigReference, Path]()
awst_lookup = {n.id: n for n in awst}
for target_id, path in options.compilation_set.items():
match awst_lookup.get(target_id):
case awst_nodes.Contract(id=contract_id):
compilation_set[contract_id] = path
case awst_nodes.LogicSignature(id=lsig_id):
compilation_set[lsig_id] = path
case None:
logger.error(f"compilation target {target_id!r} not found in AWST")
case other:
logger.error(f"unexpected compilation target type: {type(other).__name__}")
awst_to_teal(log_ctx, options, compilation_set, sources_by_path, awst)
# note: needs to be outside the with block
log_ctx.exit_if_errors()
|
algorandfoundation/puya
|
src/puya/main.py
|
Python
|
NOASSERTION
| 1,947 |
algorandfoundation/puya
|
src/puya/mir/__init__.py
|
Python
|
NOASSERTION
| 0 |
|
import contextlib
from collections.abc import Iterator
import attrs
from puya.errors import InternalError
@attrs.define
class AlignedWriter:
_headers: list[str] = attrs.field(factory=list)
_lines: list[list[str] | str] = attrs.field(factory=list)
_current_line: list[str] = attrs.field(factory=list)
_ignore_current_line: bool = False
_padding: dict[int, int] = attrs.field(factory=dict)
_indent: str = ""
omit_empty_columns: bool = True
@property
def ignore_current_line(self) -> bool:
return self._ignore_current_line
@property
def current_indent(self) -> str:
return self._indent
@contextlib.contextmanager
def indent(self) -> Iterator[None]:
indent_width = 4
original_indent = self._indent
self._indent += " " * indent_width
try:
yield
finally:
self._indent = original_indent
def add_header(self, header: str, padding: int = 1) -> None:
self._padding[len(self._headers)] = padding
self._headers.append(header)
def append(self, part: str) -> None:
self._current_line.append(part)
def new_line(self) -> None:
parts = self._current_line
if not self._ignore_current_line:
if parts and self._indent:
parts[0] = f"{self._indent}{parts[0]}"
self._lines.append(parts)
self._ignore_current_line = False
self._current_line = []
def append_line(self, line: str) -> None:
if self._current_line or self._ignore_current_line:
raise InternalError(
"Cannot append a new line while a current line is in progress, missing new_line()?"
)
self._lines.append(line)
def ignore_line(self) -> None:
self._ignore_current_line = True
def _join_columns(self, line: list[str], widths: dict[int, int]) -> str:
return "".join(
part.ljust(widths.get(column, 0))
for column, part in enumerate(line)
if widths.get(column, 0) or not self.omit_empty_columns
).rstrip()
def write(self) -> list[str]:
widths = dict[int, int]()
all_lines = self._lines.copy()
for parts in all_lines:
if isinstance(parts, list):
for column, part in enumerate(parts):
widths[column] = max(widths.get(column, 0), len(part))
for column, width in widths.items():
if width == 0 and self.omit_empty_columns:
continue
if column < len(self._headers):
width = max(width, len(self._headers[column]))
widths[column] = width + self._padding.get(column, 1)
if self._headers:
all_lines.insert(0, self._headers)
return [
line if isinstance(line, str) else self._join_columns(line, widths)
for line in all_lines
]
|
algorandfoundation/puya
|
src/puya/mir/aligned_writer.py
|
Python
|
NOASSERTION
| 2,943 |
import typing
from collections.abc import Sequence
import attrs
from puya import log
from puya.errors import CodeError, InternalError
from puya.ir import models as ir
from puya.ir.types_ import AVMBytesEncoding
from puya.ir.visitor import IRVisitor
from puya.mir import models
from puya.mir.context import ProgramMIRContext
from puya.utils import biguint_bytes_eval
logger = log.get_logger(__name__)
@attrs.define
class MemoryIRBuilder(IRVisitor[None]):
context: ProgramMIRContext = attrs.field(on_setattr=attrs.setters.frozen)
current_subroutine: ir.Subroutine
is_main: bool
current_ops: list[models.Op] = attrs.field(factory=list)
terminator: models.ControlOp | None = None
active_op: ir.Op | ir.ControlOp | None = None
def _add_op(self, op: models.Op) -> None:
self.current_ops.append(op)
def _terminate(self, op: models.ControlOp) -> None:
assert self.terminator is None
self.terminator = op
def _get_block_name(self, block: ir.BasicBlock) -> str:
if block is self.current_subroutine.entry:
return self.context.subroutine_names[self.current_subroutine]
assert block in self.current_subroutine.body
comment = (block.comment or "block").replace(" ", "_")
subroutine_name = self.context.subroutine_names[self.current_subroutine]
return f"{subroutine_name}_{comment}@{block.id}"
def visit_assignment(self, ass: ir.Assignment) -> None:
ass.source.accept(self)
# right most target is top of stack
for target in reversed(ass.targets):
try:
param_idx = self.current_subroutine.parameters.index(target)
except ValueError:
self._add_op(
models.AbstractStore(
local_id=target.local_id,
source_location=ass.source_location,
atype=target.atype,
)
)
else:
index = param_idx - len(self.current_subroutine.parameters)
self._add_op(
models.StoreParam(
local_id=target.local_id,
index=index,
source_location=ass.source_location,
atype=target.atype,
)
)
def visit_register(self, reg: ir.Register) -> None:
produces = (reg.local_id,)
if isinstance(self.active_op, ir.Assignment):
if reg is self.active_op.source:
(target,) = self.active_op.targets
produces = (target.local_id,)
elif (
isinstance(self.active_op.source, ir.ValueTuple)
and reg in self.active_op.source.values
):
index = self.active_op.source.values.index(reg)
target = self.active_op.targets[index]
produces = (target.local_id,)
try:
param_idx = self.current_subroutine.parameters.index(reg)
except ValueError:
self._add_op(
models.AbstractLoad(
local_id=reg.local_id,
produces=produces,
source_location=(self.active_op or reg).source_location,
atype=reg.atype,
)
)
else:
index = param_idx - len(self.current_subroutine.parameters)
if produces[0] == reg.local_id:
produces = (f"{produces[0]} (copy)",)
self._add_op(
models.LoadParam(
local_id=reg.local_id,
produces=produces,
index=index,
source_location=(self.active_op or reg).source_location,
atype=reg.atype,
)
)
def visit_undefined(self, val: ir.Undefined) -> None:
self._add_op(
models.Undefined(
atype=val.atype,
source_location=val.source_location,
)
)
def visit_template_var(self, deploy_var: ir.TemplateVar) -> None:
self._add_op(
models.TemplateVar(
name=deploy_var.name,
atype=deploy_var.atype,
source_location=deploy_var.source_location,
)
)
def visit_uint64_constant(self, const: ir.UInt64Constant) -> None:
self._add_op(
models.Int(
const.value if not const.teal_alias else const.teal_alias,
source_location=const.source_location,
)
)
def visit_biguint_constant(self, const: ir.BigUIntConstant) -> None:
big_uint_bytes = biguint_bytes_eval(const.value)
self._add_op(
models.Byte(
big_uint_bytes,
source_location=const.source_location,
encoding=AVMBytesEncoding.base16,
)
)
def visit_bytes_constant(self, const: ir.BytesConstant) -> None:
self._add_op(
models.Byte(
const.value, encoding=const.encoding, source_location=const.source_location
)
)
def visit_address_constant(self, const: ir.AddressConstant) -> None:
self._add_op(
models.Address(
const.value,
source_location=const.source_location,
)
)
def visit_method_constant(self, const: ir.MethodConstant) -> None:
self._add_op(
models.Method(
const.value,
source_location=const.source_location,
)
)
def visit_intrinsic_op(self, intrinsic: ir.Intrinsic) -> None:
discard_results = intrinsic is self.active_op
if intrinsic.op.code.startswith("box_"):
try:
box_key = intrinsic.args[0]
except ValueError:
raise InternalError("box key arg not found", intrinsic.source_location) from None
if isinstance(box_key, ir.BytesConstant) and not box_key.value:
raise CodeError("AVM does not support empty box keys", intrinsic.source_location)
for arg in intrinsic.args:
arg.accept(self)
produces = len(intrinsic.op_signature.returns)
self._add_op(
models.IntrinsicOp(
op_code=intrinsic.op.code,
immediates=intrinsic.immediates,
source_location=intrinsic.source_location,
consumes=len(intrinsic.op_signature.args),
produces=_produces_from_op(intrinsic.op.code, produces, self.active_op),
error_message=intrinsic.error_message,
)
)
if discard_results and produces:
self._add_op(models.Pop(produces))
def visit_invoke_subroutine(self, callsub: ir.InvokeSubroutine) -> None:
discard_results = callsub is self.active_op
target = callsub.target
callsub_op = models.CallSub(
target=self.context.subroutine_names[target],
parameters=len(target.parameters),
returns=len(target.returns),
produces=_produces_from_op(
self.context.subroutine_names[target], len(target.returns), self.active_op
),
source_location=callsub.source_location,
)
# prepare args
for arg in callsub.args:
arg.accept(self)
# call sub
self._add_op(callsub_op)
if discard_results and target.returns:
num_returns = len(target.returns)
self._add_op(models.Pop(num_returns))
def visit_conditional_branch(self, branch: ir.ConditionalBranch) -> None:
branch.condition.accept(self)
self._terminate(
models.ConditionalBranch(
nonzero_target=self._get_block_name(branch.non_zero),
zero_target=self._get_block_name(branch.zero),
source_location=branch.source_location,
)
)
def visit_goto(self, goto: ir.Goto) -> None:
self._terminate(
models.Goto(
target=self._get_block_name(goto.target),
source_location=goto.source_location,
)
)
def visit_goto_nth(self, goto_nth: ir.GotoNth) -> None:
block_labels = [self._get_block_name(block) for block in goto_nth.blocks]
goto_nth.value.accept(self)
self._terminate(
models.Switch(
switch_targets=block_labels,
default_target=self._get_block_name(goto_nth.default),
source_location=goto_nth.source_location,
)
)
def visit_switch(self, switch: ir.Switch) -> None:
blocks = list[str]()
for case, block in switch.cases.items():
case.accept(self)
block_name = self._get_block_name(block)
blocks.append(block_name)
switch.value.accept(self)
self._terminate(
models.Match(
match_targets=blocks,
default_target=self._get_block_name(switch.default),
source_location=switch.source_location,
)
)
def visit_subroutine_return(self, retsub: ir.SubroutineReturn) -> None:
for r in retsub.result:
r.accept(self)
if self.is_main:
assert len(retsub.result) == 1
self._terminate(models.ProgramExit(source_location=retsub.source_location))
else:
self._terminate(
models.RetSub(returns=len(retsub.result), source_location=retsub.source_location)
)
def visit_program_exit(self, exit_: ir.ProgramExit) -> None:
exit_.result.accept(self)
self._terminate(models.ProgramExit(source_location=exit_.source_location))
def visit_fail(self, fail: ir.Fail) -> None:
self._terminate(
models.Err(error_message=fail.error_message, source_location=fail.source_location)
)
def lower_block_to_mir(self, block: ir.BasicBlock) -> models.MemoryBasicBlock:
self.current_ops = list[models.Op]()
self.terminator = None
for op in block.all_ops:
assert not isinstance(op, ir.Phi)
self.active_op = op
op.accept(self)
assert self.terminator is not None
block_name = self._get_block_name(block)
predecessors = [self._get_block_name(b) for b in block.predecessors]
assert block.id is not None
return models.MemoryBasicBlock(
id=block.id,
block_name=block_name,
mem_ops=self.current_ops,
terminator=self.terminator,
predecessors=predecessors,
source_location=block.source_location,
)
def visit_compiled_contract_reference(self, const: ir.CompiledContractReference) -> None:
_unexpected_node(const)
def visit_compiled_logicsig_reference(self, const: ir.CompiledLogicSigReference) -> None:
_unexpected_node(const)
def visit_value_tuple(self, tup: ir.ValueTuple) -> None:
_unexpected_node(tup)
def visit_itxn_constant(self, const: ir.ITxnConstant) -> None:
_unexpected_node(const)
def visit_inner_transaction_field(self, field: ir.InnerTransactionField) -> None:
_unexpected_node(field)
def visit_phi(self, phi: ir.Phi) -> None:
_unexpected_node(phi)
def visit_phi_argument(self, arg: ir.PhiArgument) -> None:
_unexpected_node(arg)
def _unexpected_node(node: ir.IRVisitable) -> typing.Never:
raise InternalError(
f"Encountered node of type {type(node).__name__!r} during codegen"
f" - should have been eliminated in prior stages",
node.source_location,
)
def _produces_from_op(
prefix: str, size: int, maybe_assignment: ir.IRVisitable | None
) -> Sequence[str]:
produces = models.produces_from_desc(prefix, size)
if isinstance(maybe_assignment, ir.Assignment):
produces = [r.local_id for r in maybe_assignment.targets]
return produces
|
algorandfoundation/puya
|
src/puya/mir/builder.py
|
Python
|
NOASSERTION
| 12,186 |
from collections.abc import Mapping
import attrs
from puya.context import ArtifactCompileContext
from puya.ir import models as ir
from puya.mir import models
from puya.mir.vla import VariableLifetimeAnalysis
from puya.utils import attrs_extend
@attrs.define(kw_only=True)
class ProgramMIRContext(ArtifactCompileContext):
program: ir.Program
subroutine_names: Mapping[ir.Subroutine, str] = attrs.field(init=False)
@subroutine_names.default
def _get_short_subroutine_names(self) -> dict[ir.Subroutine, str]:
"""Return a mapping of unique TEAL names for all subroutines in program, while attempting
to use the shortest name possible"""
names = {"main": self.program.main}
for subroutine in self.program.subroutines:
if subroutine.short_name and subroutine.short_name not in names:
name = subroutine.short_name
else:
assert subroutine.id not in names
name = subroutine.id
names[name] = subroutine
return {v: k for k, v in names.items()}
def for_subroutine(self, subroutine: models.MemorySubroutine) -> "SubroutineCodeGenContext":
return attrs_extend(SubroutineCodeGenContext, self, subroutine=subroutine)
@attrs.define(frozen=False)
class SubroutineCodeGenContext(ProgramMIRContext):
subroutine: models.MemorySubroutine
_vla: VariableLifetimeAnalysis | None = None
@property
def vla(self) -> VariableLifetimeAnalysis:
if self._vla is None:
self._vla = VariableLifetimeAnalysis.analyze(self.subroutine)
return self._vla
def invalidate_vla(self) -> None:
self._vla = None
|
algorandfoundation/puya
|
src/puya/mir/context.py
|
Python
|
NOASSERTION
| 1,685 |
from puya.context import ArtifactCompileContext
from puya.ir import models as ir
from puya.mir import models
from puya.mir.builder import MemoryIRBuilder
from puya.mir.context import ProgramMIRContext
from puya.mir.stack_allocation import global_stack_allocation
from puya.utils import attrs_extend
def program_ir_to_mir(context: ArtifactCompileContext, program_ir: ir.Program) -> models.Program:
ctx = attrs_extend(ProgramMIRContext, context, program=program_ir)
result = models.Program(
kind=program_ir.kind,
main=_lower_subroutine_to_mir(ctx, program_ir.main, is_main=True),
subroutines=[
_lower_subroutine_to_mir(ctx, ir_sub, is_main=False)
for ir_sub in program_ir.subroutines
],
avm_version=program_ir.avm_version,
)
global_stack_allocation(ctx, result)
return result
def _lower_subroutine_to_mir(
context: ProgramMIRContext, subroutine: ir.Subroutine, *, is_main: bool
) -> models.MemorySubroutine:
builder = MemoryIRBuilder(context=context, current_subroutine=subroutine, is_main=is_main)
body = [builder.lower_block_to_mir(block) for block in subroutine.body]
signature = models.Signature(
name=subroutine.id,
parameters=[
models.Parameter(name=p.name, local_id=p.local_id, atype=p.atype)
for p in subroutine.parameters
],
returns=[r.avm_type for r in subroutine.returns],
)
return models.MemorySubroutine(
id=context.subroutine_names[subroutine],
signature=signature,
is_main=is_main,
body=body,
source_location=subroutine.source_location,
)
|
algorandfoundation/puya
|
src/puya/mir/main.py
|
Python
|
NOASSERTION
| 1,665 |
from __future__ import annotations
import abc
import typing
import typing as t
from functools import cached_property
import attrs
from puya.avm import AVMType
from puya.errors import InternalError
from puya.ir.utils import format_bytes, format_error_comment
from puya.program_refs import ProgramKind
if t.TYPE_CHECKING:
from collections.abc import Iterator, Mapping, Sequence
from puya.ir.types_ import AVMBytesEncoding
from puya.mir.visitor import MIRVisitor
from puya.parse import SourceLocation
_T = t.TypeVar("_T")
@attrs.frozen(kw_only=True, eq=False, str=False)
class BaseOp(abc.ABC):
error_message: str | None = None
source_location: SourceLocation | None = None
consumes: int
"""How many values are removed from the top of l-stack
Does not take into account any manipulations lower in the stack e.g. from Load*Stack"""
produces: Sequence[str]
"""The local ids that are appended to the l-stack
Does not take into account any manipulations lower in the stack e.g. from Store*Stack"""
@abc.abstractmethod
def accept(self, visitor: MIRVisitor[_T]) -> _T: ...
def _is_single_item(_: object, __: object, value: Sequence[str]) -> None:
assert len(value) == 1, "expected single item"
@attrs.frozen(eq=False)
class Op(BaseOp, abc.ABC):
@abc.abstractmethod
def __str__(self) -> str: ...
@attrs.frozen(eq=False)
class Int(Op):
value: int | str
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(validator=_is_single_item)
@produces.default
def _produces(self) -> Sequence[str]:
return (str(self.value),)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_int(self)
def __str__(self) -> str:
return f"int {self.value}"
@attrs.frozen(eq=False)
class Byte(Op):
value: bytes
encoding: AVMBytesEncoding
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(validator=_is_single_item)
@produces.default
def _produces(self) -> Sequence[str]:
return (format_bytes(self.value, self.encoding),)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_byte(self)
def __str__(self) -> str:
return f"byte {format_bytes(self.value, self.encoding)}"
@attrs.frozen(eq=False)
class Undefined(Op):
atype: typing.Literal[AVMType.bytes, AVMType.uint64]
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(validator=_is_single_item)
@produces.default
def _produces(self) -> Sequence[str]:
return ("undefined",)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_undefined(self)
def __str__(self) -> str:
return "undefined"
@attrs.frozen(eq=False)
class TemplateVar(Op):
name: str
atype: AVMType
op_code: typing.Literal["int", "byte"] = attrs.field(init=False)
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(validator=_is_single_item)
@produces.default
def _produces(self) -> Sequence[str]:
return (self.name,)
@op_code.default
def _default_opcode(self) -> typing.Literal["int", "byte"]:
match self.atype:
case AVMType.bytes:
return "byte"
case AVMType.uint64:
return "int"
case _:
raise InternalError(
f"Unsupported atype for TemplateVar: {self.atype}", self.source_location
)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_template_var(self)
def __str__(self) -> str:
return f"{self.op_code} {self.name}"
@attrs.frozen(eq=False)
class Address(Op):
value: str
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(validator=_is_single_item)
@produces.default
def _produces(self) -> Sequence[str]:
return (f"Address({self.value})",)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_address(self)
def __str__(self) -> str:
return f'addr "{self.value}"'
@attrs.frozen(eq=False)
class Method(Op):
value: str
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(validator=_is_single_item)
@produces.default
def _produces(self) -> Sequence[str]:
return (f"Method({self.value})",)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_method(self)
def __str__(self) -> str:
return f"method {self.value}"
@attrs.frozen(eq=False)
class Comment(Op):
comment: str
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_comment(self)
def __str__(self) -> str:
return f"// {self.comment}"
@attrs.frozen(kw_only=True, eq=False)
class StoreOp(Op, abc.ABC):
"""An op for storing values"""
local_id: str
atype: AVMType = attrs.field()
@atype.validator
def _validate_not_any(self, _attribute: object, atype: AVMType) -> None:
if atype is AVMType.any:
raise InternalError(f"Register has type any: {self}", self.source_location)
@attrs.frozen(kw_only=True, eq=False)
class LoadOp(Op, abc.ABC):
"""An op for loading values"""
local_id: str
atype: AVMType = attrs.field()
@atype.validator
def _validate_not_any(self, _attribute: object, atype: AVMType) -> None:
if atype is AVMType.any:
raise InternalError(f"Register has type any: {self}", self.source_location)
@attrs.frozen(eq=False)
class AbstractStore(StoreOp):
consumes: int = attrs.field(default=1, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_abstract_store(self)
def __str__(self) -> str:
return f"v-store {self.local_id}"
@attrs.frozen(eq=False)
class AbstractLoad(LoadOp):
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(validator=_is_single_item)
@produces.default
def _produces(self) -> Sequence[str]:
return (self.local_id,)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_abstract_load(self)
def __str__(self) -> str:
return f"v-load {self.local_id}"
@attrs.frozen(eq=False, kw_only=True)
class StoreLStack(StoreOp):
depth: int = attrs.field(validator=attrs.validators.ge(0))
copy: bool
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field()
@produces.default
def _produces(self) -> Sequence[str]:
return (f"{self.local_id} (copy)",) if self.copy else ()
@produces.validator
def _validate_produces(self, _: object, value: Sequence[str]) -> None:
assert len(value) == (1 if self.copy else 0), "invalid produces size"
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_store_l_stack(self)
def __str__(self) -> str:
op = "l-store-copy" if self.copy else "l-store"
return f"{op} {self.local_id} {self.depth}"
@attrs.frozen(eq=False)
class LoadLStack(LoadOp):
copy: bool
consumes: int = attrs.field(init=False, default=0)
produces: Sequence[str] = attrs.field(validator=_is_single_item)
# depth can only be defined after koopmans pass and dead store removal
depth: int | None = None
@produces.default
def _produces(self) -> Sequence[str]:
produces = self.local_id
if self.copy:
produces += " (copy)"
return (produces,)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_load_l_stack(self)
def __str__(self) -> str:
depth = "" if self.depth is None else f" {self.depth}"
op = "l-load-copy" if self.copy else "l-load"
return f"{op} {self.local_id}{depth}"
@attrs.frozen(eq=False)
class StoreXStack(StoreOp):
depth: int = attrs.field(validator=attrs.validators.ge(0))
consumes: int = attrs.field(default=1, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_store_x_stack(self)
def __str__(self) -> str:
return f"x-store {self.local_id}"
@attrs.frozen(eq=False)
class LoadXStack(LoadOp):
depth: int = attrs.field(validator=attrs.validators.ge(0))
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(validator=_is_single_item)
@produces.default
def _produces(self) -> Sequence[str]:
return (self.local_id,)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_load_x_stack(self)
def __str__(self) -> str:
return f"x-load {self.local_id}"
@attrs.frozen(eq=False)
class StoreFStack(StoreOp):
depth: int = attrs.field(validator=attrs.validators.ge(0))
frame_index: int = attrs.field(validator=attrs.validators.ge(0))
insert: bool = False
consumes: int = attrs.field(default=1, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_store_f_stack(self)
def __str__(self) -> str:
return f"f-store {self.local_id}"
@attrs.frozen(eq=False)
class LoadFStack(LoadOp):
depth: int = attrs.field(validator=attrs.validators.ge(0))
frame_index: int = attrs.field(validator=attrs.validators.ge(0))
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(validator=_is_single_item)
@produces.default
def _produces(self) -> Sequence[str]:
return (f"{self.local_id} (copy)",)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_load_f_stack(self)
def __str__(self) -> str:
return f"f-load {self.local_id}"
@attrs.frozen(eq=False)
class LoadParam(LoadOp):
index: int
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(validator=_is_single_item)
@produces.default
def _produces(self) -> Sequence[str]:
return (f"{self.local_id} (copy)",)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_load_param(self)
def __str__(self) -> str:
return f"p-load {self.local_id}"
@attrs.frozen(eq=False)
class StoreParam(StoreOp):
index: int
consumes: int = attrs.field(default=1, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_store_param(self)
def __str__(self) -> str:
return f"p-store {self.local_id}"
@attrs.frozen(eq=False)
class Pop(Op):
n: int
consumes: int = attrs.field(init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
@consumes.default
def _consumes(self) -> int:
return self.n
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_pop(self)
def __str__(self) -> str:
return f"pop {self.n}"
@attrs.frozen(eq=False)
class Allocate(Op):
bytes_vars: Sequence[str]
uint64_vars: Sequence[str]
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
@property
def allocate_on_entry(self) -> Sequence[str]:
return [*self.bytes_vars, *self.uint64_vars]
@property
def num_bytes(self) -> int:
return len(self.bytes_vars)
@property
def num_uints(self) -> int:
return len(self.uint64_vars)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_allocate(self)
def __str__(self) -> str:
return f"allocate {len(self.allocate_on_entry)} to stack"
@attrs.frozen(eq=False)
class CallSub(Op):
target: str
parameters: int
returns: int
consumes: int = attrs.field(init=False)
produces: Sequence[str] = attrs.field()
@consumes.default
def _consumes(self) -> int:
return self.parameters
@produces.validator
def _validate_produces(self, _: object, value: Sequence[str]) -> None:
assert len(value) == self.returns, "invalid produces size"
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_callsub(self)
def __str__(self) -> str:
return f"callsub {self.target}"
@attrs.frozen(eq=False)
class IntrinsicOp(Op):
"""An Op that does something other than just manipulating memory"""
# TODO: use enum values for these ops
op_code: str
immediates: Sequence[str | int] = attrs.field(default=(), converter=tuple[str | int, ...])
def __attrs_post_init__(self) -> None:
if self.op_code in ("b", "bz", "bnz", "switch", "match", "retsub", "err", "return"):
raise InternalError(
f"Branching op {self.op_code} should map to explicit MIR ControlOp",
self.source_location,
)
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_intrinsic(self)
def __str__(self) -> str:
result = [self.op_code, *map(str, self.immediates)]
if self.error_message:
result.append("//")
result.append(format_error_comment(self.op_code, self.error_message))
return " ".join(result)
@attrs.frozen(eq=False)
class ControlOp(BaseOp, abc.ABC):
@abc.abstractmethod
def targets(self) -> Sequence[str]: ...
@abc.abstractmethod
def _str_components(self) -> tuple[str, ...]: ...
@typing.final
def __str__(self) -> str:
result = tuple(self._str_components())
if self.error_message:
result += ("//", self.error_message)
return " ".join(result)
@attrs.frozen(eq=False)
class RetSub(ControlOp):
returns: int
fx_height: int = 0
# l-stack is discarded after this op
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
@typing.override
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_retsub(self)
@typing.override
def targets(self) -> Sequence[str]:
return ()
@typing.override
def _str_components(self) -> tuple[str, ...]:
return ("retsub",)
@attrs.frozen(eq=False)
class ProgramExit(ControlOp):
consumes: int = attrs.field(default=1, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
@typing.override
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_program_exit(self)
@typing.override
def targets(self) -> Sequence[str]:
return ()
@typing.override
def _str_components(self) -> tuple[str, ...]:
return ("return",)
@attrs.frozen(eq=False)
class Err(ControlOp):
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
@typing.override
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_err(self)
@typing.override
def targets(self) -> Sequence[str]:
return ()
@typing.override
def _str_components(self) -> tuple[str, ...]:
return ("err",)
@attrs.frozen(eq=False)
class Goto(ControlOp):
consumes: int = attrs.field(default=0, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
target: str
@typing.override
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_goto(self)
@typing.override
def targets(self) -> Sequence[str]:
return (self.target,)
@typing.override
def _str_components(self) -> tuple[str, ...]:
return "b", self.target
@attrs.frozen(eq=False)
class ConditionalBranch(ControlOp):
consumes: int = attrs.field(default=1, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
zero_target: str
nonzero_target: str
@typing.override
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_conditional_branch(self)
@typing.override
def targets(self) -> Sequence[str]:
return self.zero_target, self.nonzero_target
@typing.override
def _str_components(self) -> tuple[str, ...]:
return "bz", self.zero_target, ";", "b", self.nonzero_target
@attrs.frozen(eq=False)
class Switch(ControlOp):
consumes: int = attrs.field(default=1, init=False)
produces: Sequence[str] = attrs.field(default=(), init=False)
switch_targets: Sequence[str] = attrs.field(converter=tuple[str, ...])
default_target: str
@typing.override
def targets(self) -> Sequence[str]:
return *self.switch_targets, self.default_target
@typing.override
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_switch(self)
@typing.override
def _str_components(self) -> tuple[str, ...]:
return "switch", *self.switch_targets, ";", "b", self.default_target
@attrs.frozen(eq=False)
class Match(ControlOp):
produces: Sequence[str] = attrs.field(default=(), init=False)
match_targets: Sequence[str] = attrs.field(converter=tuple[str, ...])
consumes: int = attrs.field(init=False)
default_target: str
@consumes.default
def _consumes(self) -> int:
return len(self.match_targets) + 1
@typing.override
def targets(self) -> Sequence[str]:
return *self.match_targets, self.default_target
@typing.override
def accept(self, visitor: MIRVisitor[_T]) -> _T:
return visitor.visit_match(self)
@typing.override
def _str_components(self) -> tuple[str, ...]:
return "match", *self.match_targets, ";", "b", self.default_target
@attrs.define(eq=False, repr=False, kw_only=True)
class MemoryBasicBlock:
id: int
block_name: str
mem_ops: list[Op]
terminator: ControlOp
predecessors: list[str]
source_location: SourceLocation
# the ordering of values on the stack is used by debug maps
# the assumption is lower levels won't change the order of variables in the stack
# however they can introduce changes that do that ordering more efficiently
x_stack_in: Sequence[str] | None = None
"""local_ids on x-stack on entry to a block"""
x_stack_out: Sequence[str] | None = None
"""local_ids on x-stack on exit from a block"""
f_stack_in: Sequence[str] = attrs.field(factory=list)
"""local_ids on f-stack on entry to a block"""
f_stack_out: Sequence[str] = attrs.field(factory=list)
"""local_ids on f-stack on exit from a block"""
@property
def ops(self) -> Sequence[BaseOp]:
return *self.mem_ops, self.terminator
def __repr__(self) -> str:
return self.block_name
@property
def entry_stack_height(self) -> int:
return len(self.f_stack_in) + len(self.x_stack_in or ())
@property
def exit_stack_height(self) -> int:
return len(self.f_stack_out) + len(self.x_stack_out or ())
@property
def successors(self) -> Sequence[str]:
return self.terminator.targets()
@attrs.frozen(kw_only=True)
class Parameter:
name: str
local_id: str
atype: AVMType
@attrs.frozen(str=False)
class Signature:
name: str
parameters: Sequence[Parameter]
returns: Sequence[AVMType]
def __str__(self) -> str:
params = ", ".join(f"{p.name}: {p.atype.name}" for p in self.parameters)
returns = ", ".join(str(r.name) for r in self.returns)
return f"{self.name}({params}) -> {returns or 'void'}:"
@attrs.define
class MemorySubroutine:
"""A lower form of IR that is concerned with memory assignment (both stack and scratch)"""
id: str
is_main: bool
signature: Signature
body: Sequence[MemoryBasicBlock]
source_location: SourceLocation | None
@cached_property
def block_map(self) -> Mapping[str, MemoryBasicBlock]:
return {b.block_name: b for b in self.body}
def get_block(self, block_name: str) -> MemoryBasicBlock:
return self.block_map[block_name]
@attrs.define
class Program:
kind: ProgramKind
main: MemorySubroutine
subroutines: list[MemorySubroutine]
avm_version: int
@property
def all_subroutines(self) -> Iterator[MemorySubroutine]:
yield self.main
yield from self.subroutines
def produces_from_desc(desc: str, size: int) -> Sequence[str]:
desc = f"{{{desc}}}"
if size > 1:
produces = [f"{desc}.{n}" for n in range(size)]
elif size == 1:
produces = [desc]
else:
produces = []
return produces
|
algorandfoundation/puya
|
src/puya/mir/models.py
|
Python
|
NOASSERTION
| 20,948 |
import textwrap
import attrs
from puya import log
from puya.context import ArtifactCompileContext, CompileContext
from puya.mir import models
from puya.mir.aligned_writer import AlignedWriter
from puya.mir.stack import Stack
from puya.parse import SourceLocation
logger = log.get_logger(__name__)
def output_memory_ir(
ctx: ArtifactCompileContext, program: models.Program, *, qualifier: str
) -> None:
output_path = ctx.build_output_path(program.kind, qualifier, "mir")
if output_path is None:
return
writer = AlignedWriter()
writer.add_header("// Op")
writer.add_header("Stack (out)", 4)
for subroutine in program.all_subroutines:
writer.append_line(f"// {subroutine.signature}")
for block in subroutine.body:
stack = Stack.begin_block(subroutine, block)
last_location = None
writer.append(f"{block.block_name}:")
writer.append(stack.full_stack_desc)
writer.new_line()
with writer.indent():
for op in block.ops:
last_location = _output_src_comment(
ctx, writer, last_location, op.source_location
)
op_str = str(op)
op.accept(stack)
# some ops can be very long (generally due to labels)
# in those (rare?) cases bypass the column alignment
if len(op_str) > 80:
writer.append_line(
writer.current_indent + op_str + " " + stack.full_stack_desc
)
else:
writer.append(op_str)
writer.append(stack.full_stack_desc)
writer.new_line()
writer.new_line()
writer.new_line()
writer.new_line()
output_path.write_text("\n".join(writer.write()), "utf8")
def _output_src_comment(
ctx: CompileContext,
writer: AlignedWriter,
last_loc: SourceLocation | None,
op_loc: SourceLocation | None,
) -> SourceLocation | None:
if op_loc:
whole_lines_location = attrs.evolve(op_loc, column=None, end_column=None)
if whole_lines_location != last_loc:
last_loc = whole_lines_location
src = ctx.try_get_source(whole_lines_location)
if src is not None:
writer.append(f"// {whole_lines_location}")
writer.new_line()
lines = textwrap.dedent("\n".join(src)).splitlines()
for line in lines:
writer.append(f"// {line.rstrip()}")
writer.new_line()
return last_loc
|
algorandfoundation/puya
|
src/puya/mir/output.py
|
Python
|
NOASSERTION
| 2,717 |
import typing
from collections.abc import Sequence
import attrs
from puya.mir import models
from puya.mir.visitor import MIRVisitor
@attrs.define
class Stack(MIRVisitor[None]):
parameters: Sequence[str]
_f_stack: list[str]
"""f-stack holds variables above the current frame"""
_x_stack: list[str]
"""x-stack holds variable that are carried between blocks"""
_l_stack: list[str] = attrs.field(factory=list)
"""l-stack holds variables that are used within a block"""
@classmethod
def begin_block(
cls, subroutine: models.MemorySubroutine, block: models.MemoryBasicBlock
) -> typing.Self:
return cls(
parameters=[p.local_id for p in subroutine.signature.parameters],
f_stack=list(block.f_stack_in),
x_stack=list(block.x_stack_in or ()), # x-stack might not be assigned yet
)
@property
def f_stack(self) -> Sequence[str]:
return self._f_stack
@property
def x_stack(self) -> Sequence[str]:
return self._x_stack
@property
def l_stack(self) -> Sequence[str]:
return self._l_stack
@property
def xl_height(self) -> int:
return len(self._l_stack) + len(self._x_stack)
@property
def fxl_height(self) -> int:
return len(self._f_stack) + len(self._l_stack) + len(self._x_stack)
@property
def full_stack_desc(self) -> str:
stack_descs = []
if self.parameters:
stack_descs.append("(𝕡) " + ",".join(self.parameters)) # noqa: RUF001
if self._f_stack:
stack_descs.append("(𝕗) " + ",".join(self._f_stack)) # noqa: RUF001
if self._x_stack:
stack_descs.append("(𝕏) " + ",".join(self._x_stack)) # noqa: RUF001
stack_descs.append(",".join(self._l_stack))
return " | ".join(stack_descs)
def _get_f_stack_dig_bury(self, value: str) -> int:
return (
len(self._f_stack)
+ len(self._x_stack)
+ len(self._l_stack)
- self._f_stack.index(value)
- 1
)
def visit_int(self, const: models.Int) -> None:
self._apply_lstack_effects(const)
def visit_byte(self, const: models.Byte) -> None:
self._apply_lstack_effects(const)
def visit_undefined(self, const: models.Undefined) -> None:
self._apply_lstack_effects(const)
def visit_template_var(self, const: models.TemplateVar) -> None:
self._apply_lstack_effects(const)
def visit_address(self, const: models.Address) -> None:
self._apply_lstack_effects(const)
def visit_method(self, const: models.Method) -> None:
self._apply_lstack_effects(const)
def visit_comment(self, _: models.Comment) -> None:
pass
def visit_abstract_store(self, store: models.AbstractStore) -> None:
self._apply_lstack_effects(store)
def visit_abstract_load(self, load: models.AbstractLoad) -> None:
self._apply_lstack_effects(load)
def _store_f_stack(self, store: models.StoreFStack) -> None:
local_id = store.local_id
# must calculate bury offsets BEFORE modifying l-stack
assert local_id in self._f_stack, f"{local_id} not in f-stack"
bury = self._get_f_stack_dig_bury(local_id)
assert bury == store.depth, f"expected {bury=} == {store.depth=}"
self._apply_lstack_effects(store)
def _insert_f_stack(self, store: models.StoreFStack) -> None:
local_id = store.local_id
assert local_id not in self._f_stack, f"{local_id} already in f-stack"
# inserting something at the top of the f-stack
# is equivalent to inserting at the bottom of the x-stack
cover = len(self._x_stack) + len(self._l_stack) - 1
assert cover == store.depth, f"expected {cover=} == {store.depth=}"
self._f_stack.append(local_id)
self._apply_lstack_effects(store)
def visit_store_f_stack(self, store: models.StoreFStack) -> None:
assert self._l_stack, f"l-stack is empty, can not store {store.local_id} to f-stack"
if store.insert:
self._insert_f_stack(store)
else:
self._store_f_stack(store)
def visit_load_f_stack(self, load: models.LoadFStack) -> None:
local_id = load.local_id
assert local_id in self._f_stack, f"{local_id} not found in f-stack"
dig = self._get_f_stack_dig_bury(local_id)
assert dig == load.depth, f"expected {dig=} == {load.depth=}"
self._apply_lstack_effects(load)
def visit_store_x_stack(self, store: models.StoreXStack) -> None:
local_id = store.local_id
assert self._l_stack, f"l-stack too small to store {local_id} to x-stack"
cover = len(self._x_stack) + len(self._l_stack) - 1
assert cover == store.depth, f"expected {cover=} == {store.depth=}"
self._x_stack.insert(0, local_id)
self._apply_lstack_effects(store)
def visit_load_x_stack(self, load: models.LoadXStack) -> None:
local_id = load.local_id
assert local_id in self._x_stack, f"{local_id} not found in x-stack"
index = self._x_stack.index(local_id)
uncover = len(self._l_stack) + len(self._x_stack) - index - 1
assert uncover == load.depth, f"expected {uncover=} == {load.depth=}"
self._x_stack.pop(index)
self._apply_lstack_effects(load)
def visit_store_l_stack(self, store: models.StoreLStack) -> None:
cover = store.depth
local_id = store.local_id
assert cover < len(
self._l_stack
), f"l-stack too small to store (cover {cover}) {store.local_id} to l-stack"
index = len(self._l_stack) - cover - 1
self._l_stack.pop()
self._l_stack.insert(index, local_id)
self._apply_lstack_effects(store)
def visit_load_l_stack(self, load: models.LoadLStack) -> None:
local_id = load.local_id
uncover = load.depth
if uncover is None: # during l-stack construction depth is not fixed
index = self._l_stack.index(local_id)
else:
index = len(self._l_stack) - uncover - 1
if not load.copy:
self._l_stack.pop(index)
self._apply_lstack_effects(load)
def visit_load_param(self, load: models.LoadParam) -> None:
assert load.local_id in self.parameters, f"{load.local_id} is not a parameter"
self._apply_lstack_effects(load)
def visit_store_param(self, store: models.StoreParam) -> None:
assert store.local_id in self.parameters, f"{store.local_id} is not a parameter"
self._apply_lstack_effects(store)
def visit_allocate(self, allocate: models.Allocate) -> None:
self._f_stack.extend(allocate.allocate_on_entry)
def visit_pop(self, pop: models.Pop) -> None:
self._apply_lstack_effects(pop)
def visit_callsub(self, callsub: models.CallSub) -> None:
self._apply_lstack_effects(callsub)
def visit_retsub(self, retsub: models.RetSub) -> None:
assert len(self._l_stack) == retsub.returns, (
f"Inconsistent l-stack height for retsub,"
f" expected {retsub.returns}, actual: {len(self._l_stack)}"
)
# retsub moves return values down below the frame to the stack height before the sub was
# called and discards anything above.
# represent this in the virtual stack with a new stack state with only the current
# l-stack (i.e. discard all values in parameters, f-stack and x-stack)
self.parameters = []
self._f_stack = []
self._x_stack = []
def visit_conditional_branch(self, op: models.ConditionalBranch) -> None:
self._apply_lstack_effects(op)
def visit_err(self, op: models.Err) -> None:
self._apply_lstack_effects(op)
def visit_goto(self, op: models.Goto) -> None:
self._apply_lstack_effects(op)
def visit_match(self, op: models.Match) -> None:
self._apply_lstack_effects(op)
def visit_program_exit(self, op: models.ProgramExit) -> None:
self._apply_lstack_effects(op)
def visit_switch(self, op: models.Switch) -> None:
self._apply_lstack_effects(op)
def visit_intrinsic(self, intrinsic: models.IntrinsicOp) -> None:
self._apply_lstack_effects(intrinsic)
def _apply_lstack_effects(self, op: models.BaseOp) -> None:
assert len(self._l_stack) >= op.consumes, f"l-stack too small for {op}"
start = len(self._l_stack) - op.consumes
self._l_stack[start:] = op.produces
|
algorandfoundation/puya
|
src/puya/mir/stack.py
|
Python
|
NOASSERTION
| 8,580 |
from puya.mir import models
from puya.mir.context import ProgramMIRContext, SubroutineCodeGenContext
from puya.mir.output import output_memory_ir
from puya.mir.stack_allocation.f_stack import f_stack_allocation
from puya.mir.stack_allocation.l_stack import l_stack_allocation
from puya.mir.stack_allocation.peephole import peephole_optimization_single_pass
from puya.mir.stack_allocation.x_stack import x_stack_allocation
# Note: implementation of http://www.euroforth.org/ef06/shannon-bailey06.pdf
def global_stack_allocation(ctx: ProgramMIRContext, program: models.Program) -> None:
for desc, method in {
"lstack": l_stack_allocation,
"lstack.opt": _peephole_optimization,
"xstack": x_stack_allocation,
"xstack.opt": _peephole_optimization,
"fstack": f_stack_allocation,
"fstack.opt": _peephole_optimization,
}.items():
for mir_sub in program.all_subroutines:
sub_ctx = ctx.for_subroutine(mir_sub)
method(sub_ctx)
if ctx.options.output_memory_ir:
output_memory_ir(ctx, program, qualifier=desc)
if ctx.options.output_memory_ir:
output_memory_ir(ctx, program, qualifier="")
def _peephole_optimization(ctx: SubroutineCodeGenContext) -> None:
# replace sequences of stack manipulations with shorter ones
vla_modified = False
for block in ctx.subroutine.body:
result = peephole_optimization_single_pass(ctx, block)
vla_modified = vla_modified or result.vla_modified
if vla_modified:
ctx.invalidate_vla()
|
algorandfoundation/puya
|
src/puya/mir/stack_allocation/__init__.py
|
Python
|
NOASSERTION
| 1,566 |
import typing
from collections.abc import Sequence
import attrs
from puya import log
from puya.avm import AVMType
from puya.errors import InternalError
from puya.mir import models as mir
from puya.mir.context import SubroutineCodeGenContext
from puya.mir.stack import Stack
from puya.utils import attrs_extend
logger = log.get_logger(__name__)
def _get_lazy_fstack(subroutine: mir.MemorySubroutine) -> dict[str, mir.AbstractStore]:
# TODO: consider more than the entry block
entry = subroutine.body[0]
# if entry is re-entrant then can't lazy allocate anything
if entry.predecessors:
return {}
result = dict[str, mir.AbstractStore]()
for op in entry.ops:
if isinstance(op, mir.AbstractStore):
result.setdefault(op.local_id, op)
return result
def _get_local_id_types(subroutine: mir.MemorySubroutine) -> dict[str, AVMType]:
variable_mapping = dict[str, AVMType]()
for block in subroutine.body:
for op in block.ops:
if isinstance(op, mir.AbstractStore):
try:
existing_type = variable_mapping[op.local_id]
except KeyError:
existing_type = op.atype
variable_mapping[op.local_id] = existing_type | op.atype
return variable_mapping
def _get_allocate_op(
subroutine: mir.MemorySubroutine, all_variables: Sequence[str]
) -> mir.Allocate:
# determine variables to allocate at beginning of frame,
# and order them so bytes are listed first, followed by uints
byte_vars = []
uint64_vars = []
variable_type_mapping = _get_local_id_types(subroutine)
for variable in all_variables:
match variable_type_mapping.get(variable):
case AVMType.uint64:
uint64_vars.append(variable)
case AVMType.bytes:
byte_vars.append(variable)
case AVMType.any:
raise InternalError(
"Encountered AVM type any on preamble construction",
subroutine.source_location,
)
case None:
# shouldn't occur, undefined variables should still have an Undefined entry
# with a type
raise InternalError(f"Undefined register: {variable}", subroutine.source_location)
case unexpected:
typing.assert_never(unexpected)
return mir.Allocate(bytes_vars=byte_vars, uint64_vars=uint64_vars)
def f_stack_allocation(ctx: SubroutineCodeGenContext) -> None:
all_variables = ctx.vla.all_variables
if not all_variables:
return
subroutine = ctx.subroutine
first_store_ops = _get_lazy_fstack(subroutine)
allocate_on_first_store = list(first_store_ops)
unsorted_pre_allocate = [x for x in all_variables if x not in first_store_ops]
if unsorted_pre_allocate:
allocate = _get_allocate_op(subroutine, unsorted_pre_allocate)
allocate_at_entry = allocate.allocate_on_entry
subroutine.body[0].mem_ops.insert(0, allocate)
else:
allocate_at_entry = []
logger.debug(f"{subroutine.signature.name} f-stack entry: {allocate_at_entry}")
logger.debug(f"{subroutine.signature.name} f-stack on first store: {allocate_on_first_store}")
all_f_stack = [*allocate_at_entry, *first_store_ops.keys()]
subroutine.body[0].f_stack_out = all_f_stack
for block in subroutine.body[1:]:
block.f_stack_in = block.f_stack_out = all_f_stack
removed_virtual = False
for block in subroutine.body:
stack = Stack.begin_block(subroutine, block)
for index, op in enumerate(block.mem_ops):
match op:
case mir.AbstractStore() as store:
insert = op in first_store_ops.values()
if insert:
depth = stack.xl_height - 1
else:
depth = stack.fxl_height - stack.f_stack.index(store.local_id) - 1
block.mem_ops[index] = op = attrs_extend(
mir.StoreFStack,
store,
depth=depth,
frame_index=stack.fxl_height - depth - 1,
insert=insert,
)
removed_virtual = True
case mir.AbstractLoad() as load:
depth = stack.fxl_height - stack.f_stack.index(load.local_id) - 1
block.mem_ops[index] = op = attrs_extend(
mir.LoadFStack,
load,
depth=depth,
frame_index=stack.fxl_height - depth - 1,
)
removed_virtual = True
op.accept(stack)
match block.terminator:
case mir.RetSub() as retsub:
block.terminator = attrs.evolve(
retsub, fx_height=len(stack.f_stack) + len(stack.x_stack)
)
if removed_virtual:
ctx.invalidate_vla()
|
algorandfoundation/puya
|
src/puya/mir/stack_allocation/f_stack.py
|
Python
|
NOASSERTION
| 5,074 |
import itertools
import attrs
from puya import log
from puya.mir import models as mir
from puya.mir.context import SubroutineCodeGenContext
from puya.mir.stack import Stack
logger = log.get_logger(__name__)
@attrs.define
class UsagePair:
a: mir.AbstractLoad | mir.AbstractStore
b: mir.AbstractLoad
a_index: int
b_index: int
@staticmethod
def by_distance(pair: "UsagePair") -> tuple[int, int, int]:
return pair.b_index - pair.a_index, pair.a_index, pair.b_index
def l_stack_allocation(ctx: SubroutineCodeGenContext) -> None:
# the following is basically koopmans algorithm
# done as part of http://www.euroforth.org/ef06/shannon-bailey06.pdf
# see also https://users.ece.cmu.edu/~koopman/stack_compiler/stack_co.html#appendix
for block in ctx.subroutine.body:
usage_pairs = _find_usage_pairs(block)
_copy_usage_pairs(ctx, block, usage_pairs)
for block in ctx.subroutine.body:
_dead_store_removal(ctx, block)
if ctx.options.optimization_level:
_implicit_store_removal(block)
# update vla after dead store removal
ctx.invalidate_vla()
# calculate load depths now that l-stack allocations are done
for block in ctx.subroutine.body:
_calculate_load_depths(ctx, block)
def _find_usage_pairs(block: mir.MemoryBasicBlock) -> list[UsagePair]:
# find usage pairs of variables within the block
# the first element of the pair is an op that defines or uses a variable
# the second element of the pair is an op that uses the variable
# return pairs in ascending order, based on the number of instruction between each pair
variables = dict[str, list[tuple[int, mir.AbstractStore | mir.AbstractLoad]]]()
for index, op in enumerate(block.ops):
match op:
case mir.AbstractStore(local_id=local_id) | mir.AbstractLoad(local_id=local_id):
variables.setdefault(local_id, []).append((index, op))
pairs = list[UsagePair]()
for uses in variables.values():
# pairwise iteration means an op can only be in at most 2 pairs
for (a_index, a), (b_index, b) in itertools.pairwise(uses):
if isinstance(b, mir.AbstractStore):
continue # skip redefines, if they are used they will be picked up in next pair
pairs.append(UsagePair(a=a, b=b, a_index=a_index, b_index=b_index))
return sorted(pairs, key=UsagePair.by_distance)
def _copy_usage_pairs(
ctx: SubroutineCodeGenContext, block: mir.MemoryBasicBlock, pairs: list[UsagePair]
) -> None:
# 1. copy define or use to bottom of l-stack
# 2. replace usage with instruction to rotate the value from the bottom of l-stack to the top
# to copy: dup, cover {stack_height}
# to rotate: uncover {stack_height} - 1
replaced_ops = dict[mir.StoreOp | mir.LoadOp, mir.LoadOp]()
for pair in pairs:
# note: pairs may refer to ops that have been replaced by an earlier iteration
a = replaced_ops.get(pair.a, pair.a)
b = replaced_ops.get(pair.b, pair.b)
local_id = a.local_id
# step 1. copy define or use to bottom of stack
# redetermine index as block ops may have changed
a_index = block.mem_ops.index(a)
# insert replacement before store, or after load
insert_index = a_index if isinstance(a, mir.AbstractStore) else a_index + 1
stack = Stack.begin_block(ctx.subroutine, block)
for op in block.mem_ops[:insert_index]:
op.accept(stack)
dup = mir.StoreLStack(
depth=len(stack.l_stack) - 1,
local_id=local_id,
# leave a copy for the original consumer of this value which is either:
# a.) the virtual store we are inserting before
# b.) whatever came after the virtual load we are inserting after
# The copy will be eliminated during dead store removal if no longer required
copy=True,
source_location=a.source_location,
atype=a.atype,
)
block.mem_ops.insert(insert_index, dup)
logger.debug(f"Inserted {block.block_name}.ops[{insert_index}]: '{dup}'")
# step 2. replace b usage with instruction to rotate the value from the bottom of the stack
# determine index of b, as inserts may have shifted its location
b_index = block.mem_ops.index(b)
uncover = mir.LoadLStack(
# can not determine depth yet
# as it depends on any other l-stack operations between the store and this load
# which could change until after dead store removal is complete
depth=None,
local_id=local_id,
copy=False,
source_location=b.source_location,
atype=b.atype,
)
# replace op
block.mem_ops[b_index] = uncover
# remember replacement in case it is part of another pair
# an op can only be at most in 2 pairs, so don't need to do this recursively
replaced_ops[b] = uncover
logger.debug(f"Replaced {block.block_name}.ops[{b_index}]: '{b}' with '{uncover}'")
def _dead_store_removal(ctx: SubroutineCodeGenContext, block: mir.MemoryBasicBlock) -> None:
ops = block.mem_ops
op_idx = 0
while op_idx < len(ops) - 1:
window = slice(op_idx, op_idx + 2)
a, b = ops[window]
if (
isinstance(a, mir.StoreLStack)
and a.copy
and isinstance(b, mir.AbstractStore)
and b.local_id not in ctx.vla.get_live_out_variables(b)
):
# StoreLStack is used to:
# 1.) create copy of the value to be immediately stored via virtual store
# 2.) rotate the value to the bottom of the stack for use in a later op in this block
# If it is a dead store, then the 1st scenario is no longer needed
# and instead just need to ensure the value is moved to the bottom of the stack
a = attrs.evolve(a, copy=False, produces=())
ops[window] = [a]
elif (
isinstance(a, mir.LoadLStack)
and not a.copy
and isinstance(b, mir.StoreLStack)
and b.copy
and a.local_id == b.local_id
):
a = attrs.evolve(
a,
copy=True,
produces=(f"{a.local_id} (copy)",),
)
ops[window] = [a]
op_idx += 1
def _implicit_store_removal(block: mir.MemoryBasicBlock) -> None:
ops = block.mem_ops
op_idx = 0
while op_idx < len(ops):
op = ops[op_idx]
# see if ops immediately after this op are all storing to the l-stack what this op produces
next_op_idx = op_idx + 1
maybe_remove_window = slice(next_op_idx, next_op_idx + len(op.produces))
maybe_remove = [
maybe_store
for maybe_store in ops[maybe_remove_window]
if isinstance(maybe_store, mir.StoreLStack)
and not maybe_store.copy
and maybe_store.local_id in op.produces
]
# if they all match then this means all values are implicitly on the l-stack
# and we can safely remove the store ops
if len(maybe_remove) == len(op.produces):
ops[maybe_remove_window] = []
op_idx = next_op_idx
def _calculate_load_depths(ctx: SubroutineCodeGenContext, block: mir.MemoryBasicBlock) -> None:
stack = Stack.begin_block(ctx.subroutine, block)
for idx, op in enumerate(block.mem_ops):
if isinstance(op, mir.LoadLStack):
local_id_index = stack.l_stack.index(op.local_id)
block.mem_ops[idx] = attrs.evolve(op, depth=len(stack.l_stack) - local_id_index - 1)
op.accept(stack)
|
algorandfoundation/puya
|
src/puya/mir/stack_allocation/l_stack.py
|
Python
|
NOASSERTION
| 7,791 |
from collections.abc import Sequence
import attrs
from puya.mir import models as mir
from puya.mir.context import SubroutineCodeGenContext
def optimize_pair(
ctx: SubroutineCodeGenContext,
a: mir.Op,
b: mir.Op,
) -> Sequence[mir.Op] | None:
"""Given a pair of ops, returns which ops should be kept including replacements"""
# this function has been optimized to reduce the number of isinstance checks,
# consider this when making any modifications
# move local_ids to produces of previous op where possible
if (
isinstance(b, mir.StoreLStack | mir.StoreXStack | mir.StoreFStack)
and len(a.produces)
and a.produces[-1] != b.local_id
):
a = attrs.evolve(a, produces=(*a.produces[:-1], b.local_id))
return a, b
# remove redundant stores and loads
if a.produces and a.produces[-1] == _get_local_id_alias(b):
return (a,)
if isinstance(b, mir.AbstractStore) and b.local_id not in ctx.vla.get_live_out_variables(b):
# note l-stack dead store removal occurs during l-stack allocation
# this handles any other cases
return a, mir.Pop(n=1, source_location=b.source_location)
if isinstance(a, mir.LoadOp) and isinstance(b, mir.StoreOp) and a.local_id == b.local_id:
match a, b:
case mir.LoadXStack(), mir.StoreXStack():
return ()
case mir.LoadFStack(), mir.StoreFStack():
return ()
case mir.AbstractLoad(), mir.AbstractStore():
# this is used see test_cases/bug_load_store_load_store
return ()
return None
@attrs.define(kw_only=True)
class PeepholeResult:
modified: bool
vla_modified: bool
def peephole_optimization_single_pass(
ctx: SubroutineCodeGenContext, block: mir.MemoryBasicBlock
) -> PeepholeResult:
result = block.mem_ops
op_idx = 0
modified = False
vla_modified = False
while op_idx < len(result) - 1:
window = slice(op_idx, op_idx + 2)
curr_op, next_op = result[window]
pair_result = optimize_pair(ctx, curr_op, next_op)
if pair_result is not None:
modified = True
result[window] = pair_result
# check if VLA needs updating
vla_modified = (
vla_modified
or (
curr_op not in pair_result
and isinstance(curr_op, mir.AbstractStore | mir.AbstractLoad)
)
or (
next_op not in pair_result
and isinstance(next_op, mir.AbstractStore | mir.AbstractLoad)
)
)
else: # if nothing optimized, then advance
op_idx += 1
return PeepholeResult(modified=modified, vla_modified=vla_modified)
def _get_local_id_alias(op: mir.BaseOp) -> str | None:
"""Returns the local_id of a memory op if it has no effect
apart from renaming the top variable on the stack"""
if isinstance(op, mir.StoreLStack | mir.LoadLStack) and not op.copy and not op.depth:
return op.local_id
# TODO: the following can only be done if the movement between l-stack and the other stack
# is captured somehow (also check assumption that it needs to be captured...)
# if isinstance(op, mir.StoreXStack | mir.LoadXStack) and not op.depth:
# return op.local_id
# if isinstance(op, mir.StoreFStack) and op.insert and not op.depth:
# return op.local_id
return None
|
algorandfoundation/puya
|
src/puya/mir/stack_allocation/peephole.py
|
Python
|
NOASSERTION
| 3,527 |
import itertools
from collections.abc import Iterable, Sequence, Set
import attrs
from puya import log
from puya.errors import InternalError
from puya.mir import models as mir
from puya.mir.context import SubroutineCodeGenContext
from puya.mir.stack import Stack
logger = log.get_logger(__name__)
@attrs.define(eq=False, repr=False)
class BlockRecord:
block: mir.MemoryBasicBlock
local_references: list[mir.AbstractStore | mir.AbstractLoad]
live_in: Set[str]
live_out: Set[str]
children: "list[BlockRecord]" = attrs.field(factory=list)
parents: "list[BlockRecord]" = attrs.field(factory=list)
co_parents: "list[BlockRecord]" = attrs.field(factory=list)
siblings: "list[BlockRecord]" = attrs.field(factory=list)
x_stack_in: Sequence[str] | None = None
x_stack_out: Sequence[str] | None = None
def __repr__(self) -> str:
# due to recursive nature of BlockRecord, provide str implementation to
# simplify output
return f"BlockRecord({self.block})"
@staticmethod
def by_index(block: "BlockRecord") -> int:
return block.block.id
@attrs.frozen
class EdgeSet:
out_blocks: Sequence[BlockRecord] = attrs.field(converter=tuple[BlockRecord, ...])
in_blocks: Sequence[BlockRecord] = attrs.field(converter=tuple[BlockRecord, ...])
def sort_by_appearance(
variables: Set[str], block: mir.MemoryBasicBlock, *, load: bool = True
) -> Sequence[str]:
appearance = list[str]()
block_ops = block.ops if load else reversed(block.ops)
if load:
virtual_ops = (o.local_id for o in block_ops if isinstance(o, mir.AbstractLoad))
else:
virtual_ops = (o.local_id for o in block_ops if isinstance(o, mir.AbstractStore))
for local_id in virtual_ops:
if local_id in variables and local_id not in appearance:
appearance.append(local_id)
# don't keep searching once we are done
if len(appearance) == len(variables):
break
return appearance
def len_and_value(value: tuple[str, ...]) -> tuple[int, tuple[str, ...]]:
return len(value), value
def find_shared_x_stack(x_stack_candidates: Sequence[Sequence[str]]) -> Sequence[str]:
"""Find a common subsequence that is shared by all x-stacks"""
cache = dict[tuple[tuple[str, ...], tuple[str, ...]], tuple[str, ...]]()
def lcs(s1: tuple[str, ...], s2: tuple[str, ...]) -> tuple[str, ...]:
key = (s1, s2)
result = cache.get(key)
if result is None:
i = len(s1)
j = len(s2)
if i == 0 or j == 0:
result = ()
elif s1[-1] == s2[-1]:
result = (*lcs(s1[:-1], s2[:-1]), s1[-1])
else:
result = max(lcs(s1[:-1], s2), lcs(s1, s2[:-1]), key=len_and_value)
cache[key] = result
return result
shared, *others = sorted({tuple(s) for s in x_stack_candidates}, key=len_and_value)
for other in others:
shared = lcs(shared, other)
return shared
def get_x_stack_load_ops(record: BlockRecord) -> set[mir.AbstractLoad]:
block = record.block
assert block.x_stack_in is not None
remaining = set(block.x_stack_in)
load_ops = []
for ref in record.local_references:
if isinstance(ref, mir.AbstractLoad) and ref.local_id in remaining:
remaining.remove(ref.local_id)
load_ops.append(ref)
if remaining:
raise InternalError(
f"Failed to move {', '.join(remaining)} from the x-stack",
location=block.source_location,
)
return set(load_ops)
def get_x_stack_store_ops(record: BlockRecord) -> set[mir.AbstractStore]:
block = record.block
assert block.x_stack_out is not None
remaining = set(block.x_stack_out)
store_ops = []
for ref in reversed(record.local_references):
if isinstance(ref, mir.AbstractStore) and ref.local_id in remaining:
remaining.remove(ref.local_id)
store_ops.append(ref)
if remaining:
raise InternalError(
f"Failed to copy {', '.join(remaining)} to the x-stack",
location=block.source_location,
)
return set(store_ops)
def add_x_stack_ops(ctx: SubroutineCodeGenContext, record: BlockRecord) -> None:
block = record.block
# determine ops to replace
load_ops = get_x_stack_load_ops(record)
store_ops = get_x_stack_store_ops(record)
stack = Stack.begin_block(ctx.subroutine, block)
for index, op in enumerate(block.mem_ops):
if op in store_ops:
assert isinstance(op, mir.AbstractStore)
# can replace virtual store op because only variables that could be fully
# scheduled are on the x-stack
block.mem_ops[index] = op = mir.StoreXStack(
local_id=op.local_id,
depth=stack.xl_height - 1, # store to bottom
atype=op.atype,
source_location=op.source_location,
)
elif op in load_ops:
assert isinstance(op, mir.AbstractLoad)
block.mem_ops[index] = op = mir.LoadXStack(
local_id=op.local_id,
depth=stack.xl_height - stack.x_stack.index(op.local_id) - 1,
atype=op.atype,
source_location=op.source_location,
)
op.accept(stack)
def add_x_stack_ops_to_edge_sets(
ctx: SubroutineCodeGenContext, edge_sets: Sequence[EdgeSet]
) -> None:
records = dict.fromkeys(
b
for edge_set in edge_sets
for b in itertools.chain(edge_set.out_blocks, edge_set.in_blocks)
if b.x_stack_in or b.x_stack_out
)
for record in records:
assert record.x_stack_in is not None
assert record.x_stack_out is not None
record.block.x_stack_in = record.x_stack_in
record.block.x_stack_out = record.x_stack_out
add_x_stack_ops(ctx, record)
def _unique_ordered_blocks(blocks: Iterable[BlockRecord]) -> list[BlockRecord]:
return sorted(set(blocks), key=BlockRecord.by_index)
def get_edge_set(block: BlockRecord) -> EdgeSet | None:
out_blocks = _unique_ordered_blocks(itertools.chain((block,), block.co_parents))
# keep expanding out_blocks (and consequently in_blocks) until out_blocks stabilize
while True:
in_blocks = _unique_ordered_blocks(s for p in out_blocks for s in p.children)
new_out_blocks = _unique_ordered_blocks(p for s in in_blocks for p in s.parents)
if new_out_blocks == out_blocks:
break
out_blocks = new_out_blocks
return EdgeSet(out_blocks, in_blocks) if in_blocks else None
def get_edge_sets(ctx: SubroutineCodeGenContext) -> Sequence[EdgeSet]:
subroutine = ctx.subroutine
vla = ctx.vla
records = {
block: BlockRecord(
block=block,
local_references=[
op for op in block.ops if isinstance(op, mir.AbstractStore | mir.AbstractLoad)
],
live_in=vla.get_live_in_variables(block.ops[0]),
live_out=vla.get_live_out_variables(block.ops[-1]),
)
for block in subroutine.body
}
# given blocks 1,2,3,4,5,6 and 7
# edges: 1->5, 2->4, 2->5, 2->6, 3->5, 7->6, 7->8
#
# e.g 1 2 3 7
# \/|\/ / \
# / \|/ \ / \
# 4 5 6 8
#
# consider 2
# 4, 5 & 6 are children
# 1 & 3 are co-parents of 5, 7 is a co-parent of 6
# 1, 2, 3 and 7 form the out_blocks of an edge set
# 4, 5 & 6 are the in_blocks of the same edge set
# 1. first pass
# populate children and parents
blocks = [records[b] for b in subroutine.body]
for block in blocks:
block.children = [records[subroutine.get_block(c)] for c in block.block.successors]
for child in block.children:
child.parents.append(block)
# 2. second pass - boundary mapping
for block in blocks:
# determine siblings
for parent in block.parents:
for child in parent.children:
if child is not block and child not in block.siblings:
block.siblings.append(child)
# determine co-parents
for child in block.children:
for parent in child.parents:
if parent is not block and parent not in block.co_parents:
block.co_parents.append(parent)
edge_sets = dict[EdgeSet, None]()
for block in blocks:
# keep expanding edge set until it stabilizes
edge_set = get_edge_set(block)
if edge_set:
edge_sets[edge_set] = None
else:
block.x_stack_out = ()
if not block.parents:
block.x_stack_in = ()
return list(edge_sets.keys())
def schedule_sets(ctx: SubroutineCodeGenContext, edge_sets: Sequence[EdgeSet]) -> None:
# determine all blocks referencing variables, so we can track if all references to a
# variable are scheduled to x-stack
stores = dict[str, set[mir.MemoryBasicBlock]]()
loads = dict[str, set[mir.MemoryBasicBlock]]()
vla = ctx.vla
for variable in vla.all_variables:
stores[variable] = vla.get_store_blocks(variable)
loads[variable] = vla.get_load_blocks(variable)
for edge_set in edge_sets:
in_blocks = edge_set.in_blocks
out_blocks = edge_set.out_blocks
# get potential l-stacks (unordered)
l_stacks = [
*(b.live_out for b in out_blocks),
*(b.live_in for b in in_blocks),
]
# determine shared l-stack variables for this edge_set
# determine all valid x-stacks (ordered)
first, *others = l_stacks
common_locals = frozenset(first).intersection(*others)
# TODO: better results might be possible if we allow reordering of x-stack
x_stack_candidates = [
*(sort_by_appearance(common_locals, b.block, load=False) for b in out_blocks),
*(sort_by_appearance(common_locals, b.block, load=True) for b in in_blocks),
]
# find an x_stack for this EdgeSet
x_stack = find_shared_x_stack(x_stack_candidates)
for block in out_blocks:
assert block.x_stack_out is None
block.x_stack_out = x_stack
for x in x_stack:
stores[x].remove(block.block)
for block in in_blocks:
assert block.x_stack_in is None
block.x_stack_in = x_stack
for x in x_stack:
loads[x].remove(block.block)
# adjust final x-stacks based on what could be fully scheduled
variables_not_fully_scheduled = {
var for var, blocks in itertools.chain(stores.items(), loads.items()) if len(blocks) > 0
}
variables_successfully_scheduled = sorted(stores.keys() - variables_not_fully_scheduled)
for block in {b for es in edge_sets for b in itertools.chain((*es.out_blocks, *es.in_blocks))}:
assert block.x_stack_out is not None
assert block.x_stack_in is not None
block.x_stack_out = tuple(
x for x in block.x_stack_out if x in variables_successfully_scheduled
)
block.x_stack_in = tuple(
x for x in block.x_stack_in if x in variables_successfully_scheduled
)
if variables_successfully_scheduled:
ctx.invalidate_vla()
logger.debug(
f"Allocated {len(variables_successfully_scheduled)} "
f"variable/s to x-stack: {', '.join(variables_successfully_scheduled)}"
)
def validate_pair(parent: BlockRecord, child: BlockRecord) -> bool:
parent_x = parent.x_stack_out
child_x = child.x_stack_in
assert parent_x is not None
assert child_x is not None
if parent_x != child_x:
logger.error(
f"x-stacks do not match for {parent.block} -> {child.block}: "
f"{', '.join(parent_x)} -> {', '.join(child_x)}"
)
return False
if parent_x:
logger.debug(f"shared x-stack for {parent.block} -> {child.block}: {', '.join(parent_x)}")
return True
def validate_x_stacks(edge_sets: Sequence[EdgeSet]) -> bool:
ok = True
for edge_set in edge_sets:
for parent in edge_set.out_blocks:
for child in edge_set.in_blocks:
ok = validate_pair(parent, child) and ok
return ok
def x_stack_allocation(ctx: SubroutineCodeGenContext) -> None:
# this is basically baileys algorithm
edge_sets = get_edge_sets(ctx)
if not edge_sets:
# nothing to do
return
logger.debug(f"Found {len(edge_sets)} edge set/s for {ctx.subroutine.signature.name}")
schedule_sets(ctx, edge_sets)
if not validate_x_stacks(edge_sets):
raise InternalError("Could not schedule x-stack")
add_x_stack_ops_to_edge_sets(ctx, edge_sets)
|
algorandfoundation/puya
|
src/puya/mir/stack_allocation/x_stack.py
|
Python
|
NOASSERTION
| 12,878 |
from __future__ import annotations
import abc
import typing
if typing.TYPE_CHECKING:
from puya.mir import models
class MIRVisitor[T](abc.ABC):
@abc.abstractmethod
def visit_int(self, push: models.Int) -> T: ...
@abc.abstractmethod
def visit_byte(self, push: models.Byte) -> T: ...
@abc.abstractmethod
def visit_undefined(self, push: models.Undefined) -> T: ...
@abc.abstractmethod
def visit_comment(self, comment: models.Comment) -> T: ...
@abc.abstractmethod
def visit_store_l_stack(self, store: models.StoreLStack) -> T: ...
@abc.abstractmethod
def visit_load_l_stack(self, load: models.LoadLStack) -> T: ...
@abc.abstractmethod
def visit_store_x_stack(self, store: models.StoreXStack) -> T: ...
@abc.abstractmethod
def visit_load_x_stack(self, load: models.LoadXStack) -> T: ...
@abc.abstractmethod
def visit_store_f_stack(self, store: models.StoreFStack) -> T: ...
@abc.abstractmethod
def visit_load_f_stack(self, load: models.LoadFStack) -> T: ...
@abc.abstractmethod
def visit_load_param(self, load: models.LoadParam) -> T: ...
@abc.abstractmethod
def visit_store_param(self, load: models.StoreParam) -> T: ...
@abc.abstractmethod
def visit_abstract_store(self, store: models.AbstractStore) -> T: ...
@abc.abstractmethod
def visit_abstract_load(self, load: models.AbstractLoad) -> T: ...
@abc.abstractmethod
def visit_allocate(self, allocate: models.Allocate) -> T: ...
@abc.abstractmethod
def visit_pop(self, pop: models.Pop) -> T: ...
@abc.abstractmethod
def visit_callsub(self, callsub: models.CallSub) -> T: ...
@abc.abstractmethod
def visit_intrinsic(self, intrinsic: models.IntrinsicOp) -> T: ...
@abc.abstractmethod
def visit_retsub(self, retsub: models.RetSub) -> T: ...
@abc.abstractmethod
def visit_program_exit(self, op: models.ProgramExit) -> T: ...
@abc.abstractmethod
def visit_err(self, op: models.Err) -> T: ...
@abc.abstractmethod
def visit_goto(self, op: models.Goto) -> T: ...
@abc.abstractmethod
def visit_conditional_branch(self, op: models.ConditionalBranch) -> T: ...
@abc.abstractmethod
def visit_switch(self, op: models.Switch) -> T: ...
@abc.abstractmethod
def visit_match(self, op: models.Match) -> T: ...
@abc.abstractmethod
def visit_address(self, addr: models.Address) -> T: ...
@abc.abstractmethod
def visit_method(self, method: models.Method) -> T: ...
@abc.abstractmethod
def visit_template_var(self, deploy_var: models.TemplateVar) -> T: ...
|
algorandfoundation/puya
|
src/puya/mir/visitor.py
|
Python
|
NOASSERTION
| 2,641 |
import itertools
import typing
from collections.abc import Sequence, Set
from functools import cached_property
import attrs
from puya.mir import models
from puya.utils import StableSet
@attrs.define(kw_only=True)
class _OpLifetime:
block: models.MemoryBasicBlock
used: StableSet[str] = attrs.field(on_setattr=attrs.setters.frozen)
defined: StableSet[str] = attrs.field(on_setattr=attrs.setters.frozen)
successors: list[typing.Self] = attrs.field(factory=list)
predecessors: list[typing.Self] = attrs.field(factory=list)
live_in: StableSet[str] = attrs.field(factory=StableSet)
live_out: StableSet[str] = attrs.field(factory=StableSet)
@attrs.define
class VariableLifetimeAnalysis:
"""Performs VLA analysis for a subroutine, providing a mapping of ops to sets of live local_ids
see https://www.classes.cs.uchicago.edu/archive/2004/spring/22620-1/docs/liveness.pdf"""
subroutine: models.MemorySubroutine
_op_lifetimes: dict[models.BaseOp, _OpLifetime] = attrs.field(init=False)
@cached_property
def all_variables(self) -> Sequence[str]:
return sorted(
{v for live in self._op_lifetimes.values() for v in (*live.defined, *live.used)}
)
@_op_lifetimes.default
def _op_lifetimes_factory(self) -> dict[models.BaseOp, _OpLifetime]:
result = dict[models.BaseOp, _OpLifetime]()
block_map = {b.block_name: b.ops[0] for b in self.subroutine.body}
for block in self.subroutine.body:
for op in block.ops:
used = StableSet[str]()
defined = StableSet[str]()
if isinstance(op, models.AbstractStore):
defined.add(op.local_id)
elif isinstance(op, models.AbstractLoad):
used.add(op.local_id)
result[op] = _OpLifetime(
block=block,
used=used,
defined=defined,
)
for block in self.subroutine.body:
for op, next_op in itertools.zip_longest(block.ops, block.ops[1:]):
if isinstance(op, models.ControlOp):
assert next_op is None
# note: control ops that end the current subroutine don't have any logical
# successors
successors = [result[block_map[s]] for s in op.targets()]
else:
assert next_op is not None
successors = [result[next_op]]
op_lifetime = result[op]
op_lifetime.successors = successors
for s in successors:
s.predecessors.append(op_lifetime)
return result
def get_live_out_variables(self, op: models.BaseOp) -> Set[str]:
return self._op_lifetimes[op].live_out
def get_live_in_variables(self, op: models.BaseOp) -> Set[str]:
return self._op_lifetimes[op].live_in
def get_store_blocks(self, variable: str) -> set[models.MemoryBasicBlock]:
return {op.block for op in self._op_lifetimes.values() if variable in op.defined}
def get_load_blocks(self, variable: str) -> set[models.MemoryBasicBlock]:
return {op.block for op in self._op_lifetimes.values() if variable in op.used}
@classmethod
def analyze(cls, subroutine: models.MemorySubroutine) -> typing.Self:
analysis = cls(subroutine)
analysis._analyze() # noqa: SLF001
return analysis
def _analyze(self) -> None:
changed = list(self._op_lifetimes.values())
while changed:
orig_changed = changed
changed = []
for n in orig_changed:
# For OUT, find out the union of previous variables
# in the IN set for each succeeding node of n.
# out[n] = U s ∈ succ[n] in[s]
live_out = StableSet[str]()
for s in n.successors:
live_out |= s.live_in
# in[n] = use[n] U (out[n] - def [n])
live_in = n.used | (live_out - n.defined)
if live_out != n.live_out or live_in != n.live_in:
n.live_in = live_in
n.live_out = live_out
changed.extend(n.predecessors)
|
algorandfoundation/puya
|
src/puya/mir/vla.py
|
Python
|
NOASSERTION
| 4,310 |
import enum
from collections.abc import Mapping
from functools import cached_property
import attrs
from puya.algo_constants import MAINNET_AVM_VERSION
class LocalsCoalescingStrategy(enum.StrEnum):
root_operand = enum.auto()
root_operand_excluding_args = enum.auto()
aggressive = enum.auto()
@attrs.frozen(kw_only=True)
class PuyaOptions:
output_teal: bool = False
output_source_map: bool = False
output_arc32: bool = False
output_arc56: bool = False
output_ssa_ir: bool = False
output_optimization_ir: bool = False
output_destructured_ir: bool = False
output_memory_ir: bool = False
output_bytecode: bool = False
debug_level: int = 1
optimization_level: int = 1
target_avm_version: int = MAINNET_AVM_VERSION
cli_template_definitions: Mapping[str, int | bytes] = attrs.field(factory=dict)
template_vars_prefix: str = "TMPL_"
# TODO: the below is probably not scalable as a set of optimisation on/off flags,
# but it'll do for now
locals_coalescing_strategy: LocalsCoalescingStrategy = LocalsCoalescingStrategy.root_operand
@cached_property
def template_variables(self) -> Mapping[str, int | bytes]:
return {self.template_vars_prefix + k: v for k, v in self.cli_template_definitions.items()}
|
algorandfoundation/puya
|
src/puya/options.py
|
Python
|
NOASSERTION
| 1,299 |
import functools
import typing
from collections.abc import Iterable
from pathlib import Path
import attrs
from puya.utils import make_path_relative_to_cwd
@attrs.frozen(kw_only=True, repr=False, str=False)
class SourceLocation:
file: Path | None = attrs.field()
line: int = attrs.field(validator=attrs.validators.ge(1))
end_line: int = attrs.field()
comment_lines: int = attrs.field(default=0, validator=attrs.validators.ge(0))
"""the number of lines preceding `line` to take as a comment"""
column: int | None = attrs.field(
default=None, validator=attrs.validators.optional(attrs.validators.ge(0))
)
end_column: int | None = attrs.field(
default=None, validator=attrs.validators.optional(attrs.validators.ge(1))
)
@file.validator
def _file_validator(self, _attribute: object, value: Path) -> None:
# this check is simply to make sure relative paths aren't accidentally passed in.
# so we use root rather than is_absolute(), because that requires a drive on Windows,
# which we naturally don't supply for synthetic paths such as embedded lib.
if value is not None and not value.root:
raise ValueError(f"source file locations cannot be relative, got {value}")
@end_line.default
def _end_line_default(self) -> int:
return self.line
@end_line.validator
def _end_line_validator(self, _attribute: object, value: int) -> None:
if value < self.line:
raise ValueError(f"end_line = {value} is before start line = {self.line}")
@end_column.validator
def _end_column_validator(self, _attribute: object, value: int | None) -> None:
if (
self.end_line == self.line
and value is not None
and self.column is not None
and value <= self.column
):
raise ValueError(
f"source location end column = {value} is before start column = {self.column}"
)
@property
def line_count(self) -> int:
return self.end_line - self.line + 1
def __str__(self) -> str:
relative_path = make_path_relative_to_cwd(self.file) if self.file else "INTERNAL"
result = f"{relative_path}:{self.line}"
if self.end_line != self.line:
result += f"-{self.end_line}"
return result
def __repr__(self) -> str:
result = str(self)
if self.column is not None:
result += f":{self.column}"
if self.end_column is not None:
result += f"-{self.end_column}"
return result
def with_comments(self) -> "SourceLocation":
if self.comment_lines == 0:
return self
return attrs.evolve(
self,
line=self.line - self.comment_lines,
column=None,
comment_lines=0,
)
def try_merge(self, other: "SourceLocation | None") -> "SourceLocation":
"""Attempt to merge this source location with another, if they are either adjacent
or overlapping in lines. If not, the source location is returned unmodified."""
if other is None or other.file != self.file:
return self
file = self.file
# if they both start at the same line, not only is there overlap,
# but things are also much simpler
if self.line == other.line:
line = self.line
# expand to the largest end_line
end_line = max(self.end_line, other.end_line)
# in theory this should be the same value, but just in case, we can take the max
comment_lines = max(self.comment_lines, other.comment_lines)
# if either location is not column-bounded, then the result shouldn't be either
# otherwise take the minimum of the columns, since the line numbers are the same
if self.column is None or other.column is None:
column = None
else:
column = min(self.column, other.column)
else:
# if they don't start on the same line, one must start first
first, second = (self, other) if self.line < other.line else (other, self)
line_after_first = first.end_line + 1
# TODO: maybe consider fetching the source to exclude blank lines?
if line_after_first < second.line:
return self
# first starts first, so... that's where we start
line = first.line
# whilst we know first starts before second,
# it's also possible that first ends after second
end_line = max(second.end_line, first.end_line)
# naturally, comment line count needs to come from the first location
comment_lines = first.comment_lines
# same first starting column
column = first.column
# the logic for computing the end_column is the same regardless of whether
# they start on the same line or not
if self.end_line == other.end_line:
# if either location is not end_column-bounded, then the result shouldn't be either
# otherwise take the maximum of the end_columns, since the line numbers are the same
if self.end_column is None or other.end_column is None:
end_column = None
else:
end_column = max(self.end_column, other.end_column)
elif self.end_line > other.end_line:
# if self ends last, take it's end column
end_column = self.end_column
else:
# otherwise other ends last, so take it's end column
end_column = other.end_column
return SourceLocation(
file=file,
line=line,
end_line=end_line,
comment_lines=comment_lines,
column=column,
end_column=end_column,
)
@typing.overload
def sequential_source_locations_merge(sources: Iterable[SourceLocation]) -> SourceLocation: ...
@typing.overload
def sequential_source_locations_merge(
sources: Iterable[SourceLocation | None],
) -> SourceLocation | None: ...
def sequential_source_locations_merge(
sources: Iterable[SourceLocation | None],
) -> SourceLocation | None:
"""Given a sequence of SourceLocations, try merging them one at a one in order.
If all sources are None, then None is returned.
If there are no sources, then a TypeError will be raised.
"""
return functools.reduce(_try_merge_source_locations, sources)
def _try_merge_source_locations(
source: SourceLocation | None, merge: SourceLocation | None
) -> SourceLocation | None:
if source is None:
return merge
return source.try_merge(merge)
|
algorandfoundation/puya
|
src/puya/parse.py
|
Python
|
NOASSERTION
| 6,777 |
import abc
import enum
import typing
import attrs
class ContractReference(str): # can't use typing.NewType with pattern matching
__slots__ = ()
class LogicSigReference(str): # can't use typing.NewType with pattern matching
__slots__ = ()
class ProgramKind(enum.StrEnum):
approval = "approval"
clear_state = "clear"
logic_signature = "lsig"
class ProgramReference(abc.ABC):
@property
@abc.abstractmethod
def kind(self) -> ProgramKind: ...
@property
@abc.abstractmethod
def reference(self) -> ContractReference | LogicSigReference: ...
@property
@abc.abstractmethod
def id(self) -> str: ...
@attrs.frozen
class LogicSigProgramReference(ProgramReference):
kind: typing.Literal[ProgramKind.logic_signature] = attrs.field(
default=ProgramKind.logic_signature, init=False
)
reference: LogicSigReference
@property
def id(self) -> str:
return self.reference
@attrs.frozen
class ContractProgramReference(ProgramReference):
kind: typing.Literal[ProgramKind.approval, ProgramKind.clear_state]
reference: ContractReference
program_name: str
@property
def id(self) -> str:
return f"{self.reference}.{self.program_name}"
|
algorandfoundation/puya
|
src/puya/program_refs.py
|
Python
|
NOASSERTION
| 1,246 |
algorandfoundation/puya
|
src/puya/teal/__init__.py
|
Python
|
NOASSERTION
| 0 |
|
import typing
from collections.abc import Sequence
import attrs
from puya.errors import InternalError
from puya.teal import models
_T = typing.TypeVar("_T", bound=models.TealOp)
def preserve_stack_manipulations(
ops: list[_T],
window: slice,
new: Sequence[_T],
) -> None:
"""Replaces window of ops with new, preserving any stack manipulations from original window
new is not empty: added to last op in new
new is empty, window starts after first index: appended to prior op
new is empty, window ends before last index: prepended to subsequent op
"""
if not new:
# expand window to include at least 1 op
if window.start > 0:
# expand window to prior op
window = slice(window.start - 1, window.stop)
new = [ops[window.start]]
elif window.stop < len(ops): # must be start of a block
# expand window to subsequent op
new = [ops[window.stop]]
window = slice(window.start, window.stop + 1)
else:
# can this even happen? if it does, maybe attach to block instead?
raise InternalError("could not preserve stack manipulations")
# clear existing stack_manipulations on new sequence
new = [attrs.evolve(op, stack_manipulations=()) for op in new]
# add original stack manipulations to last op in new sequence
new[-1] = attrs.evolve(
new[-1],
stack_manipulations=[sm for op in ops[window] for sm in op.stack_manipulations],
)
# replace original ops with new ops
ops[window] = new
|
algorandfoundation/puya
|
src/puya/teal/_util.py
|
Python
|
NOASSERTION
| 1,583 |
import typing
from itertools import zip_longest
import attrs
from puya.avm import AVMType
from puya.errors import InternalError
from puya.ir.types_ import AVMBytesEncoding
from puya.mir import models as mir
from puya.mir.visitor import MIRVisitor
from puya.teal import models as teal
@attrs.frozen
class TealBuilder(MIRVisitor[None]):
next_block_label: str | None
use_frame: bool
label_stack: list[str]
ops: list[teal.TealOp] = attrs.field(factory=list)
@classmethod
def build_subroutine(cls, mir_sub: mir.MemorySubroutine) -> teal.TealSubroutine:
result = teal.TealSubroutine(
is_main=mir_sub.is_main,
signature=mir_sub.signature,
blocks=[],
source_location=mir_sub.source_location,
)
entry_block = mir_sub.body[0]
label_stack = [entry_block.block_name]
blocks_by_label = {
b.block_name: (b, None if next_b is None else next_b.block_name)
for b, next_b in zip_longest(mir_sub.body, mir_sub.body[1:])
}
while label_stack:
label = label_stack.pop()
mir_block, next_block_label = blocks_by_label.pop(label, (None, None))
if mir_block is None:
continue
builder = cls(
next_block_label=next_block_label,
use_frame=not mir_sub.is_main,
label_stack=label_stack,
)
if mir_block is entry_block and not mir_sub.is_main:
builder.ops.append(
teal.Proto(
parameters=len(mir_sub.signature.parameters),
returns=len(mir_sub.signature.returns),
source_location=mir_sub.source_location,
)
)
for op in mir_block.ops:
op.accept(builder)
teal_block = teal.TealBlock(
label=mir_block.block_name,
ops=builder.ops,
x_stack_in=mir_block.x_stack_in or (),
entry_stack_height=mir_block.entry_stack_height,
exit_stack_height=mir_block.exit_stack_height,
)
teal_block.validate_stack_height()
result.blocks.append(teal_block)
return result
def _add_op(self, op: teal.TealOp) -> None:
self.ops.append(op)
def visit_int(self, const: mir.Int) -> None:
self._add_op(
teal.Int(
const.value,
stack_manipulations=_lstack_manipulations(const),
source_location=const.source_location,
)
)
def visit_byte(self, const: mir.Byte) -> None:
self._add_op(
teal.Byte(
const.value,
const.encoding,
stack_manipulations=_lstack_manipulations(const),
source_location=const.source_location,
)
)
def visit_undefined(self, push: mir.Undefined) -> None:
match push.atype:
case AVMType.uint64:
self._add_op(
teal.Byte(
b"",
AVMBytesEncoding.utf8,
stack_manipulations=_lstack_manipulations(push),
source_location=push.source_location,
)
)
case AVMType.bytes:
self._add_op(
teal.Int(
0,
stack_manipulations=_lstack_manipulations(push),
source_location=push.source_location,
)
)
case unexpected:
typing.assert_never(unexpected)
def visit_template_var(self, const: mir.TemplateVar) -> None:
self._add_op(
teal.TemplateVar(
name=const.name,
op_code=const.op_code,
stack_manipulations=_lstack_manipulations(const),
source_location=const.source_location,
)
)
def visit_address(self, const: mir.Address) -> None:
self._add_op(
teal.Address(
const.value,
stack_manipulations=_lstack_manipulations(const),
source_location=const.source_location,
)
)
def visit_method(self, const: mir.Method) -> None:
self._add_op(
teal.Method(
const.value,
stack_manipulations=_lstack_manipulations(const),
source_location=const.source_location,
)
)
def visit_comment(self, _comment: mir.Comment) -> None:
pass
def visit_abstract_store(self, store: mir.AbstractStore) -> typing.Never:
raise InternalError(
"AbstractStore op encountered during TEAL generation", store.source_location
)
def visit_abstract_load(self, load: mir.AbstractLoad) -> typing.Never:
raise InternalError(
"AbstractLoad op encountered during TEAL generation", load.source_location
)
def _store_f_stack(self, store: mir.StoreFStack) -> None:
local_id = store.local_id
source_location = store.source_location
define = teal.StackDefine(local_id)
if self.use_frame:
op: teal.TealOp = teal.FrameBury(
store.frame_index,
stack_manipulations=[*_lstack_manipulations(store), define],
source_location=source_location,
)
else:
op = teal.Bury(
store.depth,
stack_manipulations=[*_lstack_manipulations(store), define],
source_location=source_location,
)
self._add_op(op)
def _insert_f_stack(self, store: mir.StoreFStack) -> None:
local_id = store.local_id
source_location = store.source_location
self._add_op(
teal.Cover(
store.depth,
stack_manipulations=[
*_lstack_manipulations(store),
teal.StackInsert(store.depth, local_id),
teal.StackDefine(local_id),
],
source_location=source_location,
)
)
def visit_store_f_stack(self, store: mir.StoreFStack) -> None:
if store.insert:
self._insert_f_stack(store)
else:
self._store_f_stack(store)
def visit_load_f_stack(self, load: mir.LoadFStack) -> None:
sm = _lstack_manipulations(load)
loc = load.source_location
if self.use_frame: # and load.depth:
op: teal.TealOp = teal.FrameDig(
n=load.frame_index, stack_manipulations=sm, source_location=loc
)
else:
op = teal.Dig(n=load.depth, stack_manipulations=sm, source_location=loc)
self._add_op(op)
def visit_store_x_stack(self, store: mir.StoreXStack) -> None:
self._add_op(
teal.Cover(
store.depth,
stack_manipulations=[
*_lstack_manipulations(store),
teal.StackInsert(store.depth, store.local_id),
teal.StackDefine(store.local_id),
],
source_location=store.source_location,
)
)
def visit_load_x_stack(self, load: mir.LoadXStack) -> None:
self._add_op(
teal.Uncover(
load.depth,
stack_manipulations=[
teal.StackPop(load.depth),
*_lstack_manipulations(load),
],
source_location=load.source_location,
)
)
def visit_store_l_stack(self, store: mir.StoreLStack) -> None:
if store.copy:
self._add_op(
teal.Dup(
stack_manipulations=[
# re-alias top of stack
teal.StackConsume(1),
*_lstack_manipulations(store),
# actual dup
teal.StackExtend([f"{store.local_id} (copy)"]),
],
source_location=store.source_location,
),
)
cover = store.depth
if store.copy:
cover += 1
self._add_op(
teal.Cover(
cover,
stack_manipulations=[
teal.StackConsume(1),
# store
teal.StackInsert(cover, store.local_id),
teal.StackDefine([store.local_id]),
],
source_location=store.source_location,
)
)
def visit_load_l_stack(self, load: mir.LoadLStack) -> None:
uncover = load.depth
assert uncover is not None, "expected l-stack depths to be assigned"
if load.copy:
self._add_op(
teal.Dig(
uncover,
stack_manipulations=_lstack_manipulations(load),
source_location=load.source_location,
)
)
else:
self._add_op(
teal.Uncover(
uncover,
stack_manipulations=[teal.StackPop(uncover), *_lstack_manipulations(load)],
source_location=load.source_location,
)
)
def visit_load_param(self, load: mir.LoadParam) -> None:
self._add_op(
teal.FrameDig(
load.index,
source_location=load.source_location,
stack_manipulations=_lstack_manipulations(load),
)
)
def visit_store_param(self, store: mir.StoreParam) -> None:
self._add_op(
teal.FrameBury(
store.index,
stack_manipulations=_lstack_manipulations(store),
source_location=store.source_location,
)
)
def visit_allocate(self, allocate: mir.Allocate) -> None:
bad_bytes_value = teal.Int(
0,
source_location=allocate.source_location,
)
bad_uint_value = teal.Byte(
value=b"",
encoding=AVMBytesEncoding.utf8,
source_location=allocate.source_location,
)
for idx, local_id in enumerate(allocate.allocate_on_entry):
bad_value = bad_bytes_value if idx < allocate.num_bytes else bad_uint_value
self._add_op(
attrs.evolve(
bad_value,
stack_manipulations=[teal.StackExtend([local_id])],
)
)
def visit_pop(self, pop: mir.Pop) -> None:
self._add_op(
teal.PopN(
n=pop.n,
stack_manipulations=_lstack_manipulations(pop),
source_location=pop.source_location,
)
)
def visit_callsub(self, callsub: mir.CallSub) -> None:
self._add_op(
teal.CallSub(
target=callsub.target,
consumes=callsub.parameters,
produces=callsub.returns,
stack_manipulations=_lstack_manipulations(callsub),
source_location=callsub.source_location,
)
)
def visit_retsub(self, retsub: mir.RetSub) -> None:
fx_height = retsub.fx_height
if retsub.returns < fx_height:
# move returns to base of frame in order
for n in reversed(range(retsub.returns)):
self._add_op(teal.FrameBury(n, source_location=retsub.source_location))
else:
# f-stack + x-stack is smaller than number of returns, so move it out of the way
n = retsub.returns + fx_height - 1
for _ in range(fx_height):
self._add_op(teal.Uncover(n, source_location=retsub.source_location))
self._add_op(
teal.RetSub(
consumes=retsub.returns,
source_location=retsub.source_location,
)
)
def visit_program_exit(self, op: mir.ProgramExit) -> None:
self._add_op(
teal.Return(
error_message=op.error_message,
stack_manipulations=_lstack_manipulations(op),
source_location=op.source_location,
)
)
def visit_err(self, op: mir.Err) -> None:
self._add_op(
teal.Err(
error_message=op.error_message,
stack_manipulations=_lstack_manipulations(op),
source_location=op.source_location,
)
)
def visit_goto(self, op: mir.Goto) -> None:
self._add_op(
teal.Branch(
target=op.target,
error_message=op.error_message,
stack_manipulations=_lstack_manipulations(op),
source_location=op.source_location,
)
)
self.label_stack.append(op.target)
def visit_conditional_branch(self, op: mir.ConditionalBranch) -> None:
condition_op: type[teal.BranchNonZero | teal.BranchZero]
if op.nonzero_target == self.next_block_label:
condition_op = teal.BranchZero
condition_op_target = op.zero_target
other_target = op.nonzero_target
else:
condition_op = teal.BranchNonZero
condition_op_target = op.nonzero_target
other_target = op.zero_target
self._add_op(
condition_op(
target=condition_op_target,
error_message=op.error_message,
stack_manipulations=_lstack_manipulations(op),
source_location=op.source_location,
)
)
self.label_stack.append(condition_op_target)
self._add_op(
teal.Branch(
target=other_target,
error_message="",
stack_manipulations=[],
source_location=op.source_location,
)
)
self.label_stack.append(other_target)
def visit_switch(self, op: mir.Switch) -> None:
self._add_op(
teal.Switch(
targets=op.switch_targets,
error_message=op.error_message,
stack_manipulations=_lstack_manipulations(op),
source_location=op.source_location,
)
)
self.label_stack.extend(op.switch_targets)
self._add_op(
teal.Branch(
target=op.default_target,
error_message="",
stack_manipulations=[],
source_location=op.source_location,
)
)
self.label_stack.append(op.default_target)
def visit_match(self, op: mir.Match) -> None:
self._add_op(
teal.Match(
targets=op.match_targets,
error_message=op.error_message,
stack_manipulations=_lstack_manipulations(op),
source_location=op.source_location,
)
)
self.label_stack.extend(op.match_targets)
self._add_op(
teal.Branch(
target=op.default_target,
error_message="",
stack_manipulations=[],
source_location=op.source_location,
)
)
self.label_stack.append(op.default_target)
def visit_intrinsic(self, intrinsic: mir.IntrinsicOp) -> None:
self._add_op(
teal.Intrinsic(
op_code=intrinsic.op_code,
immediates=intrinsic.immediates,
error_message=intrinsic.error_message,
consumes=intrinsic.consumes,
produces=len(intrinsic.produces),
stack_manipulations=_lstack_manipulations(intrinsic),
source_location=intrinsic.source_location,
)
)
def _lstack_manipulations(op: mir.BaseOp) -> list[teal.StackManipulation]:
result = list[teal.StackManipulation]()
if op.consumes:
result.append(teal.StackConsume(op.consumes))
if op.produces:
result.append(teal.StackExtend(op.produces))
result.append(teal.StackDefine(op.produces))
return result
|
algorandfoundation/puya
|
src/puya/teal/builder.py
|
Python
|
NOASSERTION
| 16,431 |
from puya import log
from puya.context import CompileContext
from puya.mir import models as mir
from puya.teal import models as teal_models
from puya.teal.builder import TealBuilder
from puya.teal.optimize.main import optimize_teal_program
logger = log.get_logger(__name__)
def mir_to_teal(context: CompileContext, program_mir: mir.Program) -> teal_models.TealProgram:
main = TealBuilder.build_subroutine(program_mir.main)
subroutines = [TealBuilder.build_subroutine(mir_sub) for mir_sub in program_mir.subroutines]
teal = teal_models.TealProgram(
avm_version=program_mir.avm_version,
main=main,
subroutines=subroutines,
)
optimize_teal_program(context, teal)
return teal
|
algorandfoundation/puya
|
src/puya/teal/main.py
|
Python
|
NOASSERTION
| 723 |
import abc
import typing
from collections.abc import Iterable, Mapping, Sequence
import attrs
from puya.avm import OnCompletionAction, TransactionType
from puya.errors import InternalError
from puya.ir.types_ import AVMBytesEncoding
from puya.ir.utils import format_bytes, format_error_comment
from puya.mir import models as mir
from puya.parse import SourceLocation
from puya.utils import valid_bytes, valid_int64
MAX_NUMBER_CONSTANTS = 256
TEAL_ALIASES = {
**{e.name: e.value for e in OnCompletionAction},
**{e.name: e.value for e in TransactionType},
}
@attrs.frozen
class StackConsume:
n: int
@attrs.frozen
class StackExtend:
local_ids: Sequence[str]
@attrs.frozen
class StackInsert:
depth: int
local_id: str
@attrs.frozen
class StackPop:
depth: int
@attrs.frozen
class StackDefine:
local_ids: Sequence[str]
StackManipulation = StackConsume | StackExtend | StackDefine | StackInsert | StackPop
@attrs.frozen(kw_only=True)
class TealOp:
op_code: str
consumes: int
produces: int
source_location: SourceLocation | None = attrs.field(eq=False)
comment: str | None = None
"""A comment that is always emitted after the op in TEAL"""
error_message: str | None = None
"""Error message to display if program fails at this op"""
stack_manipulations: Sequence[StackManipulation] = attrs.field(
default=(),
converter=tuple[StackManipulation, ...],
eq=False,
)
@property
def immediates(self) -> Sequence[int | str]:
return ()
def teal(self) -> str:
teal_args = [self.op_code, *map(str, self.immediates)]
if self.comment or self.error_message:
error_message = (
format_error_comment(self.op_code, self.error_message)
if self.error_message
else ""
)
comment_lines = error_message.splitlines()
comment_lines += (self.comment or "").splitlines()
comment = "\n//".join(comment_lines)
teal_args.append(f"// {comment}")
return " ".join(teal_args)
@property
def stack_height_delta(self) -> int:
return self.produces - self.consumes
@attrs.frozen
class Dup(TealOp):
op_code: str = attrs.field(default="dup", init=False)
consumes: int = attrs.field(default=1, init=False)
produces: int = attrs.field(default=2, init=False)
@attrs.frozen
class Dup2(TealOp):
op_code: str = attrs.field(default="dup2", init=False)
consumes: int = attrs.field(default=2, init=False)
produces: int = attrs.field(default=4, init=False)
@attrs.frozen
class Pop(TealOp):
op_code: str = attrs.field(default="pop", init=False)
consumes: int = attrs.field(default=1, init=False)
produces: int = attrs.field(default=0, init=False)
@attrs.frozen
class TealOpN(TealOp):
n: int = attrs.field(validator=[attrs.validators.ge(0), attrs.validators.le(255)])
@property
def immediates(self) -> Sequence[int | str]:
return (self.n,)
@attrs.frozen
class TealOpUInt8(TealOpN):
n: int = attrs.field(validator=[attrs.validators.ge(0), attrs.validators.le(255)])
@attrs.frozen
class TealOpInt8(TealOpN):
n: int = attrs.field(validator=[attrs.validators.ge(-128), attrs.validators.le(127)])
@attrs.frozen
class Cover(TealOpUInt8):
op_code: str = attrs.field(default="cover", init=False)
consumes: int = attrs.field(init=False)
produces: int = attrs.field(init=False)
@consumes.default
def _consumes(self) -> int:
return self.n + 1
@produces.default
def _produces(self) -> int:
return self.n + 1
@attrs.frozen
class Uncover(TealOpUInt8):
op_code: str = attrs.field(default="uncover", init=False)
consumes: int = attrs.field(init=False)
produces: int = attrs.field(init=False)
@consumes.default
def _consumes(self) -> int:
return self.n + 1
@produces.default
def _produces(self) -> int:
return self.n + 1
@attrs.frozen
class Swap(TealOp):
op_code: str = attrs.field(default="swap", init=False)
consumes: int = 2
produces: int = 2
@attrs.frozen
class Dig(TealOpUInt8):
op_code: str = attrs.field(default="dig", init=False)
consumes: int = attrs.field(init=False)
produces: int = attrs.field(init=False)
@consumes.default
def _consumes(self) -> int:
return self.n + 1
@produces.default
def _produces(self) -> int:
return self.n + 2
@attrs.frozen
class Bury(TealOpUInt8):
op_code: str = attrs.field(default="bury", init=False)
consumes: int = attrs.field(init=False)
produces: int = attrs.field(init=False)
@consumes.default
def _consumes(self) -> int:
return self.n
@produces.default
def _produces(self) -> int:
return self.n - 1
def _valid_uint64(node: TealOp, _attribute: object, value: int) -> None:
if not valid_int64(value):
raise InternalError(
"Invalid UInt64 value",
node.source_location,
)
def _valid_bytes(node: TealOp, _attribute: object, value: bytes) -> None:
if not valid_bytes(value):
raise InternalError("Invalid Bytes value", node.source_location)
def _valid_ref(node: TealOp, _attribute: object, value: int) -> None:
if value < 0 or value >= MAX_NUMBER_CONSTANTS:
raise InternalError(
"Invalid constant reference",
node.source_location,
)
@attrs.frozen
class IntBlock(TealOp):
op_code: str = attrs.field(default="intcblock", init=False)
constants: Mapping[int | str, SourceLocation | None]
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=0, init=False)
@property
def immediates(self) -> Sequence[int | str]:
return tuple(self.constants)
@attrs.frozen
class IntC(TealOp):
index: int = attrs.field(validator=_valid_ref)
op_code: str = attrs.field(init=False)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=1, init=False)
@op_code.default
def _op_code(self) -> str:
if self.index < 4:
return f"intc_{self.index}"
else:
return "intc"
@property
def immediates(self) -> Sequence[int]:
if self.index < 4:
return ()
else:
return (self.index,)
@attrs.frozen
class PushInt(TealOp):
op_code: str = attrs.field(default="pushint", init=False)
value: int = attrs.field(validator=_valid_uint64)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=1, init=False)
@property
def immediates(self) -> Sequence[int]:
return (self.value,)
@attrs.frozen
class PushInts(TealOp):
op_code: str = attrs.field(default="pushints", init=False)
values: list[int] = attrs.field(validator=attrs.validators.deep_iterable(_valid_uint64))
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(init=False)
@produces.default
def _produces(self) -> int:
return len(self.values)
@property
def immediates(self) -> Sequence[int]:
return self.values
@attrs.frozen
class BytesBlock(TealOp):
op_code: str = attrs.field(default="bytecblock", init=False)
constants: Mapping[bytes | str, tuple[AVMBytesEncoding, SourceLocation | None]]
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=0, init=False)
@property
def immediates(self) -> Sequence[str]:
return tuple(
_encoded_bytes(c, es[0]) if isinstance(c, bytes) else c
for c, es in self.constants.items()
)
@attrs.frozen
class BytesC(TealOp):
index: int = attrs.field(validator=_valid_ref)
op_code: str = attrs.field(init=False)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=1, init=False)
@op_code.default
def _op_code(self) -> str:
if self.index < 4:
return f"bytec_{self.index}"
else:
return "bytec"
@property
def immediates(self) -> Sequence[int]:
if self.index < 4:
return ()
else:
return (self.index,)
@attrs.frozen
class PushBytes(TealOp):
op_code: str = attrs.field(default="pushbytes", init=False)
value: bytes = attrs.field(validator=_valid_bytes)
# exclude encoding from equality so for example 0x and "" can be combined
encoding: AVMBytesEncoding = attrs.field(eq=False)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=1, init=False)
@property
def immediates(self) -> Sequence[str]:
return (_encoded_bytes(self.value, self.encoding),)
@attrs.frozen
class PushBytess(TealOp):
op_code: str = attrs.field(default="pushbytess", init=False)
values: Sequence[tuple[bytes, AVMBytesEncoding]] = attrs.field()
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(init=False)
@produces.default
def _produces(self) -> int:
return len(self.values)
@values.validator
def _values_validator(
self, _: object, value: Sequence[tuple[bytes, AVMBytesEncoding]]
) -> None:
if not all(valid_bytes(b) for b, _ in value):
raise InternalError("invalid bytes value", self.source_location)
@property
def immediates(self) -> Sequence[str]:
return tuple(_encoded_bytes(c, e) for c, e in self.values)
@attrs.frozen
class FrameDig(TealOpInt8):
op_code: str = attrs.field(default="frame_dig", init=False)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=1, init=False)
@attrs.frozen
class FrameBury(TealOpInt8):
op_code: str = attrs.field(default="frame_bury", init=False)
consumes: int = attrs.field(default=1, init=False)
produces: int = attrs.field(default=0, init=False)
@attrs.frozen
class Int(TealOp):
value: int | str
op_code: str = attrs.field(default="int", init=False)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=1, init=False)
@property
def immediates(self) -> Sequence[int | str]:
return (self.value,)
@attrs.frozen
class PopN(TealOpUInt8):
op_code: str = attrs.field(default="popn", init=False)
consumes: int = attrs.field(init=False)
produces: int = attrs.field(default=0, init=False)
@consumes.default
def _consumes(self) -> int:
return self.n
@attrs.frozen
class DupN(TealOpUInt8):
op_code: str = attrs.field(default="dupn", init=False)
consumes: int = attrs.field(default=1, init=False)
produces: int = attrs.field(init=False)
@produces.default
def _produces(self) -> int:
return self.n + 1
@attrs.frozen
class Proto(TealOp):
parameters: int
returns: int
op_code: str = attrs.field(default="proto", init=False)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=0, init=False)
@property
def immediates(self) -> Sequence[int | str]:
return self.parameters, self.returns
@attrs.frozen
class Byte(TealOp):
value: bytes
# exclude encoding from equality so for example 0x and "" can be combined
encoding: AVMBytesEncoding = attrs.field(eq=False)
op_code: str = attrs.field(default="byte", init=False)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=1, init=False)
@property
def immediates(self) -> Sequence[int | str]:
return (_encoded_bytes(self.value, self.encoding),)
@attrs.frozen
class TemplateVar(TealOp):
name: str
op_code: typing.Literal["int", "byte"]
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=1, init=False)
@property
def immediates(self) -> Sequence[int | str]:
return (self.name,)
@attrs.frozen
class Address(TealOp):
value: str
op_code: str = attrs.field(default="addr", init=False)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=1, init=False)
@property
def immediates(self) -> Sequence[int | str]:
return (self.value,)
@attrs.frozen
class Method(TealOp):
value: str
op_code: str = attrs.field(default="method", init=False)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=1, init=False)
@property
def immediates(self) -> Sequence[int | str]:
return (f'"{self.value}"',)
@attrs.frozen
class Intrinsic(TealOp):
immediates: Sequence[int | str]
@attrs.frozen
class ControlOp(TealOp, abc.ABC):
@property
@abc.abstractmethod
def targets(self) -> Sequence[str]: ...
@attrs.frozen
class RetSub(ControlOp):
op_code: str = attrs.field(default="retsub", init=False)
produces: int = attrs.field(default=0, init=False)
@property
def targets(self) -> Sequence[str]:
return ()
@attrs.frozen
class Return(ControlOp):
op_code: str = attrs.field(default="return", init=False)
consumes: int = attrs.field(default=1, init=False)
produces: int = attrs.field(default=0, init=False)
@property
def targets(self) -> Sequence[str]:
return ()
@attrs.frozen
class Err(ControlOp):
op_code: str = attrs.field(default="err", init=False)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=0, init=False)
@property
def targets(self) -> Sequence[str]:
return ()
@attrs.frozen
class Branch(ControlOp):
target: str
op_code: str = attrs.field(default="b", init=False)
consumes: int = attrs.field(default=0, init=False)
produces: int = attrs.field(default=0, init=False)
@property
def immediates(self) -> Sequence[int | str]:
return (self.target,)
@property
def targets(self) -> Sequence[str]:
return (self.target,)
@attrs.frozen
class BranchNonZero(ControlOp):
target: str
op_code: str = attrs.field(default="bnz", init=False)
consumes: int = attrs.field(default=1, init=False)
produces: int = attrs.field(default=0, init=False)
@property
def immediates(self) -> Sequence[int | str]:
return (self.target,)
@property
def targets(self) -> Sequence[str]:
return (self.target,)
@attrs.frozen
class BranchZero(ControlOp):
target: str
op_code: str = attrs.field(default="bz", init=False)
consumes: int = attrs.field(default=1, init=False)
produces: int = attrs.field(default=0, init=False)
@property
def immediates(self) -> Sequence[int | str]:
return (self.target,)
@property
def targets(self) -> Sequence[str]:
return (self.target,)
@attrs.frozen
class Switch(ControlOp):
targets: Sequence[str] = attrs.field(converter=tuple[str, ...])
op_code: str = attrs.field(default="switch", init=False)
consumes: int = attrs.field(default=1, init=False)
produces: int = attrs.field(default=0, init=False)
@property
def immediates(self) -> Sequence[int | str]:
return self.targets
@attrs.frozen
class Match(ControlOp):
targets: Sequence[str] = attrs.field(converter=tuple[str, ...])
op_code: str = attrs.field(default="match", init=False)
consumes: int = attrs.field(init=False)
produces: int = attrs.field(default=0, init=False)
@consumes.default
def _consumes(self) -> int:
return len(self.targets) + 1
@property
def immediates(self) -> Sequence[int | str]:
return self.targets
@attrs.frozen
class CallSub(TealOp):
target: str
op_code: str = attrs.field(default="callsub", init=False)
@property
def immediates(self) -> Sequence[int | str]:
return (self.target,)
@attrs.frozen
class TealBlock:
label: str
ops: list[TealOp]
x_stack_in: Sequence[str]
entry_stack_height: int = attrs.field(validator=attrs.validators.ge(0))
exit_stack_height: int = attrs.field(validator=attrs.validators.ge(0))
def validate_stack_height(self) -> None:
stack_height = self.entry_stack_height
for op in self.ops:
stack_height -= op.consumes
if stack_height < 0:
raise InternalError("Access below stack height", op.source_location)
stack_height += op.produces
expected_exit_height = self.exit_stack_height
if stack_height != expected_exit_height and not (
self.ops and self.ops[-1].op_code in ("return", "retsub", "err")
):
raise InternalError(
f"Stack size at block {self.label} exit is {stack_height},"
f" expected {expected_exit_height}",
self.ops[-1].source_location,
)
@attrs.frozen
class TealSubroutine:
is_main: bool
signature: mir.Signature
blocks: list[TealBlock]
source_location: SourceLocation | None
@attrs.define
class TealProgram:
avm_version: int
main: TealSubroutine
subroutines: list[TealSubroutine]
@property
def all_subroutines(self) -> Iterable[TealSubroutine]:
yield self.main
yield from self.subroutines
def _encoded_bytes(value: bytes, encoding: AVMBytesEncoding) -> str:
# not all encodings can handle an empty bytes, so use base16 if bytes is empty
if not value and encoding in (AVMBytesEncoding.base32, AVMBytesEncoding.base64):
encoding = AVMBytesEncoding.base16
bytes_str = format_bytes(value, encoding)
if encoding in (
AVMBytesEncoding.utf8,
AVMBytesEncoding.base16,
AVMBytesEncoding.unknown,
):
return bytes_str
hint = encoding.name
return f"{hint}({bytes_str})"
|
algorandfoundation/puya
|
src/puya/teal/models.py
|
Python
|
NOASSERTION
| 17,843 |
algorandfoundation/puya
|
src/puya/teal/optimize/__init__.py
|
Python
|
NOASSERTION
| 0 |
|
LOAD_OP_CODES = frozenset(
[
"addr",
"arg",
*(f"arg_{i}" for i in range(4)),
"byte",
*(f"bytec_{i}" for i in range(4)), # valid as long as we don't push through a bytecblock
"gaid",
"gload",
# global OpcodeBudget could be affected by shuffling,
# but we don't guarantee ordering of stack manipulations
"global",
"gtxn",
"gtxna",
"int",
*(f"intc_{i}" for i in range(4)), # valid as long as we don't push through an intcblock
"load", # valid as long as we don't push through a store/stores
"method",
"pushbytes",
"pushint",
"txn",
"txna",
# below are valid as long as we don't push through an itxn_submit
"gitxn",
"gitxna",
"itxn",
"itxna",
]
)
LOAD_OP_CODES_INCL_OFFSET = frozenset(
[
*LOAD_OP_CODES,
"dig",
"frame_dig",
]
)
STORE_OPS_INCL_OFFSET = frozenset(
[
"app_global_del",
# "bury", TODO: this one is very hard, and doesn't show up in any of our examples
# "assert", TODO: enable this but only for O2 or higher
"frame_bury",
"itxn_field",
"pop",
"store",
]
)
COMMUTATIVE_OPS = frozenset(
[
"+",
"*",
"&",
"&&",
"|",
"||",
"^",
"==",
"!=",
"b*",
"b+",
"b&",
"b|",
"b^",
"b==",
"b!=",
"addw",
"mulw",
]
)
ORDERING_OPS = frozenset(["<", "<=", ">", ">=", "b<", "b<=", "b>", "b>="])
|
algorandfoundation/puya
|
src/puya/teal/optimize/_data.py
|
Python
|
NOASSERTION
| 1,645 |
import itertools
import typing
from collections.abc import Sequence
from puya.parse import sequential_source_locations_merge
from puya.teal import models
def combine_pushes(program: models.TealProgram) -> None:
for block in itertools.chain.from_iterable(sub.blocks for sub in program.all_subroutines):
pushes = list[models.PushInt | models.PushBytes]()
result = list[models.TealOp]()
for op in block.ops:
if _is_different_push_type(pushes, op):
result.append(_combine_ops(pushes))
pushes = []
if isinstance(op, models.PushInt | models.PushBytes):
pushes.append(op)
else:
result.append(op)
if pushes:
result.append(_combine_ops(pushes))
block.ops[:] = result
def _is_different_push_type(
consecutive: list[models.PushInt | models.PushBytes], next_op: models.TealOp
) -> bool:
return bool(consecutive) and type(consecutive[-1]) is not type(next_op)
def _combine_ops(consecutive: Sequence[models.PushInt | models.PushBytes]) -> models.TealOp:
if len(consecutive) == 1:
return consecutive[0]
loc = sequential_source_locations_merge(op.source_location for op in consecutive)
stack_manipulations = list(
itertools.chain.from_iterable(op.stack_manipulations for op in consecutive)
)
if isinstance(consecutive[0], models.PushInt):
consecutive = typing.cast(Sequence[models.PushInt], consecutive)
return models.PushInts(
values=[v.value for v in consecutive],
stack_manipulations=stack_manipulations,
source_location=loc,
comment=_comment_ops(consecutive),
)
else:
consecutive = typing.cast(Sequence[models.PushBytes], consecutive)
return models.PushBytess(
values=[(v.value, v.encoding) for v in consecutive],
stack_manipulations=stack_manipulations,
source_location=loc,
comment=_comment_ops(consecutive),
)
def _comment_ops(consecutive: Sequence[models.PushInt | models.PushBytes]) -> str:
return ", ".join(
map(str, ((v.comment or " ".join(map(str, v.immediates))) for v in consecutive))
)
|
algorandfoundation/puya
|
src/puya/teal/optimize/combine_pushes.py
|
Python
|
NOASSERTION
| 2,258 |
import itertools
import typing
from collections import Counter
from puya.errors import CodeError, InternalError
from puya.ir.types_ import AVMBytesEncoding
from puya.parse import SourceLocation
from puya.teal import models
from puya.utils import Address, coalesce, method_selector_hash, unique
_T = typing.TypeVar("_T")
def gather_program_constants(program: models.TealProgram) -> None:
# collect constants & template vars
all_ints = list[int | str]()
all_bytes = list[bytes | str]()
bytes_encodings = dict[bytes | str, AVMBytesEncoding]()
tmpl_locs = dict[bytes | int | str, SourceLocation | None]()
# collect constants
for block in itertools.chain.from_iterable(sub.blocks for sub in program.all_subroutines):
for idx, op in enumerate(block.ops):
# replace Method & Address constants with Byte before gathering
match op:
case models.Method(value=method_value):
op = block.ops[idx] = models.Byte(
value=method_selector_hash(method_value),
encoding=AVMBytesEncoding.base16,
comment=op.teal(),
stack_manipulations=op.stack_manipulations,
source_location=op.source_location,
)
case models.Address(value=address_value, source_location=loc):
address = Address.parse(address_value)
if not address.is_valid:
raise InternalError(f"Invalid address literal: {address_value}", loc)
op = block.ops[idx] = models.Byte(
value=address.public_key,
encoding=AVMBytesEncoding.base32,
comment=op.teal(),
stack_manipulations=op.stack_manipulations,
source_location=op.source_location,
)
match op:
case models.Int(value=int_or_alias):
all_ints.append(_resolve_teal_alias(int_or_alias))
case models.Byte(value=bytes_value) as byte:
all_bytes.append(bytes_value)
# preserve bytes encoding if it matches
if bytes_encodings.setdefault(bytes_value, byte.encoding) != byte.encoding:
bytes_encodings[bytes_value] = AVMBytesEncoding.base16
# put template vars in constant blocks regardless of optimization level
case models.TemplateVar(name=name, op_code=op_code):
# capture first defined tmpl location
tmpl_locs[name] = tmpl_locs.get(name) or op.source_location
match op_code:
case "int":
all_ints.append(name)
case "byte":
all_bytes.append(name)
bytes_encodings[name] = AVMBytesEncoding.base16
case _:
typing.assert_never(op_code)
all_templates = unique(val for val in (*all_ints, *all_bytes) if isinstance(val, str))
int_block = _sort_and_filter_constants(all_ints)
byte_block = _sort_and_filter_constants(all_bytes)
assert all(
t in int_block or t in byte_block for t in all_templates
), "expected all template variables to be in constant block"
# insert constant blocks
entry_block = program.main.blocks[0]
if byte_block:
entry_block.ops.insert(
0,
models.BytesBlock(
constants={b: (bytes_encodings[b], tmpl_locs.get(b)) for b in byte_block},
source_location=None,
),
)
if int_block:
entry_block.ops.insert(
0,
models.IntBlock(
constants={i: tmpl_locs.get(i) for i in int_block}, source_location=None
),
)
# replace constants and template vars with constant load ops
for block in itertools.chain.from_iterable(sub.blocks for sub in program.all_subroutines):
for idx, op in enumerate(block.ops):
match op:
case models.Int(value=int_value):
comment = coalesce(
op.comment,
int_value if isinstance(int_value, str) else None,
str(int_value),
)
int_value = _resolve_teal_alias(int_value)
try:
const_index = int_block[int_value]
except KeyError:
block.ops[idx] = models.PushInt(
value=int_value,
stack_manipulations=op.stack_manipulations,
source_location=op.source_location,
comment=comment,
)
else:
block.ops[idx] = models.IntC(
index=const_index,
stack_manipulations=op.stack_manipulations,
source_location=op.source_location,
comment=comment,
)
case models.Byte(value=bytes_value) as byte_op:
try:
const_index = byte_block[bytes_value]
except KeyError:
block.ops[idx] = models.PushBytes(
value=bytes_value,
encoding=byte_op.encoding,
stack_manipulations=op.stack_manipulations,
source_location=op.source_location,
comment=op.comment,
)
else:
block.ops[idx] = models.BytesC(
index=const_index,
stack_manipulations=op.stack_manipulations,
source_location=op.source_location,
comment=op.comment or " ".join(map(str, op.immediates)),
)
case models.TemplateVar(name=name, op_code=op_code):
match op_code:
case "int":
block.ops[idx] = models.IntC(
index=int_block[name],
stack_manipulations=op.stack_manipulations,
source_location=op.source_location,
comment=op.comment or name,
)
case "byte":
block.ops[idx] = models.BytesC(
index=byte_block[name],
stack_manipulations=op.stack_manipulations,
source_location=op.source_location,
comment=op.comment or name,
)
case _:
typing.assert_never(op_code)
def _sort_and_filter_constants(values: list[_T | str]) -> dict[_T | str, int]:
value_frequencies = {
value: freq
for value, freq in Counter(values).most_common()
if (freq > 1 or isinstance(value, str))
}
# filter constants based on their size * frequency when used as a constant vs inline
freq_idx = 0
ordered_values = list(value_frequencies)
while freq_idx < len(ordered_values):
value = ordered_values[freq_idx]
if not isinstance(value, str):
encoded_size = _encoded_size(value)
const_usage_size = 1 if freq_idx < 4 else 2
inline_usage_size = 1 + encoded_size
freq = value_frequencies[value]
# include constant block size in consideration if only one value
block_size = 2 if len(ordered_values) == 1 else 0
inline = inline_usage_size * freq
const = const_usage_size * freq + encoded_size + block_size
if inline <= const:
ordered_values.pop(freq_idx)
continue
freq_idx += 1
# ensure any template variables that sit beyond MAX_NUMBER_CONSTANTS are always included
overflow_template_vars = [
value for value in ordered_values[models.MAX_NUMBER_CONSTANTS :] if isinstance(value, str)
]
if len(overflow_template_vars) > models.MAX_NUMBER_CONSTANTS:
# this doesn't have to be an error as the only consequence of not having all template vars
# in the constant block is that it breaks the debug mapping assumptions
# however for now it is easier to just fail, as this many template variables is unlikely
raise CodeError(f"cannot exceed {models.MAX_NUMBER_CONSTANTS} template values")
constants = (
*ordered_values[: models.MAX_NUMBER_CONSTANTS - len(overflow_template_vars)],
*overflow_template_vars,
)
assert len(constants) <= models.MAX_NUMBER_CONSTANTS, "constant block size exceeded"
return {value: index for index, value in enumerate(constants)}
def _encoded_size(value: object) -> int:
# varuint encodes 7 bits per byte
if isinstance(value, int):
# max accounts for 0, which still requires at least 1 byte
return max(1, (value.bit_length() + 6) // 7)
elif isinstance(value, bytes):
return _encoded_size(len(value)) + len(value)
else:
raise TypeError(f"unencodable type: {value!r}")
def _resolve_teal_alias(value: int | str) -> int:
return models.TEAL_ALIASES[value] if isinstance(value, str) else value
|
algorandfoundation/puya
|
src/puya/teal/optimize/constant_block.py
|
Python
|
NOASSERTION
| 9,764 |
import attrs
from puya.parse import sequential_source_locations_merge
from puya.teal import models
from puya.teal._util import preserve_stack_manipulations
from puya.teal.optimize._data import LOAD_OP_CODES
def perform_constant_stack_shuffling(block: models.TealBlock) -> bool:
result = block.ops.copy()
loads = list[models.TealOp]()
loads_modified = modified = False
start_idx = idx = 0
while idx < len(result):
op = result[idx]
if _is_constant_load(op):
if not loads:
start_idx = idx
loads_modified = False
loads.append(op)
elif loads and op.op_code in ("dup", "dupn"):
modified = loads_modified = True
(n,) = op.immediates or (1,)
assert isinstance(n, int)
# extend loads with n copies of the last load
loads.extend([attrs.evolve(loads[-1], source_location=op.source_location)] * n)
elif loads:
match op:
case models.Uncover(n=n) if n < len(loads):
modified = loads_modified = True
uncovered = loads.pop(-(n + 1))
loads.append(uncovered)
case models.Cover(n=n) if n < len(loads):
modified = loads_modified = True
covered = loads.pop()
loads.insert(len(loads) - n, covered)
case _:
if loads_modified:
window = slice(start_idx, idx)
preserve_stack_manipulations(result, window, loads)
idx = start_idx + len(loads)
loads = []
idx += 1
if loads_modified and loads:
window = slice(start_idx, len(result))
preserve_stack_manipulations(result, window, loads)
block.ops[:] = result
return modified
def constant_dupn_insertion(block: models.TealBlock) -> bool:
result = list[models.TealOp]()
loads = list[models.TealOp]()
modified = False
for op in block.ops:
if loads and op == loads[0]:
loads.append(op)
else:
if loads:
modified = _collapse_loads(loads) or modified
result.extend(loads)
loads = []
if _is_constant_load(op):
loads.append(op)
else:
result.append(op)
if loads:
modified = _collapse_loads(loads) or modified
result.extend(loads)
block.ops[:] = result
return modified
def constant_dup2_insertion(block: models.TealBlock) -> bool:
result = block.ops
modified = False
idx = 0
while (idx + 4) <= len(block.ops):
load_a, load_b, load_a2, load_b2 = block.ops[idx : idx + 4]
if (
_is_constant_load(load_a)
and _is_constant_load(load_b)
and (load_a, load_b) == (load_a2, load_b2)
):
loc = sequential_source_locations_merge(
(load_a2.source_location, load_b2.source_location)
)
dup2 = models.Dup2(source_location=loc)
preserve_stack_manipulations(result, slice(idx + 2, idx + 4), [dup2])
modified = True
idx += 3
else:
idx += 1
block.ops[:] = result
return modified
def _collapse_loads(loads: list[models.TealOp]) -> bool:
n = len(loads) - 1
if n < 1:
return False
dup_source_location = sequential_source_locations_merge(op.source_location for op in loads[1:])
if n == 1:
dup_op: models.TealOp = models.Dup(source_location=dup_source_location)
else:
dup_op = models.DupN(n=n, source_location=dup_source_location)
preserve_stack_manipulations(loads, slice(1, None), [dup_op])
return True
def _is_constant_load(op: models.TealOp) -> bool:
return op.op_code in LOAD_OP_CODES or (isinstance(op, models.FrameDig) and op.n < 0)
|
algorandfoundation/puya
|
src/puya/teal/optimize/constant_stack_shuffling.py
|
Python
|
NOASSERTION
| 3,968 |
import itertools
from collections import defaultdict
import attrs
from puya import log
from puya.context import CompileContext
from puya.teal import models
from puya.teal._util import preserve_stack_manipulations
from puya.teal.optimize.combine_pushes import combine_pushes
from puya.teal.optimize.constant_block import gather_program_constants
from puya.teal.optimize.constant_stack_shuffling import (
constant_dup2_insertion,
constant_dupn_insertion,
perform_constant_stack_shuffling,
)
from puya.teal.optimize.peephole import peephole
from puya.teal.optimize.repeated_rotations import simplify_repeated_rotation_ops, simplify_swap_ops
from puya.teal.optimize.repeated_rotations_search import repeated_rotation_ops_search
logger = log.get_logger(__name__)
def optimize_teal_program(context: CompileContext, teal_program: models.TealProgram) -> None:
for teal_sub in teal_program.all_subroutines:
_optimize_subroutine(context, teal_sub)
before = [_get_all_stack_manipulations(sub) for sub in teal_program.subroutines]
gather_program_constants(teal_program)
if context.options.optimization_level > 0:
combine_pushes(teal_program)
after = [_get_all_stack_manipulations(sub) for sub in teal_program.subroutines]
assert before == after, "expected stack manipulations to be preserved after optimization"
def _get_all_stack_manipulations(sub: models.TealSubroutine) -> list[models.StackManipulation]:
return [sm for block in sub.blocks for op in block.ops for sm in op.stack_manipulations]
def _optimize_subroutine(context: CompileContext, teal_sub: models.TealSubroutine) -> None:
logger.debug(
f"optimizing TEAL subroutine {teal_sub.signature}", location=teal_sub.source_location
)
before = _get_all_stack_manipulations(teal_sub)
for teal_block in teal_sub.blocks:
_optimize_block(teal_block, level=context.options.optimization_level)
teal_block.validate_stack_height()
after = _get_all_stack_manipulations(teal_sub)
assert before == after, "expected stack manipulations to be preserved after optimization"
if context.options.optimization_level > 0:
# at this point, blocks should still be almost "basic"
# - control flow should only enter at the start of a block.
# - control flow should only leave at the end of the block (although this might be
# spread over multiple ops in the case of say a switch or match)
# - the final op must be an unconditional control flow op (e.g. retusb, b, return, err)
_inline_jump_chains(teal_sub)
# now all blocks that are just a b should have been inlined/removed.
# any single-op blocks left must be non-branching control ops, ie ops
# that terminate either exit the program or the subroutine.
# inlining these are only possible when they are unconditionally branched to,
# thus this still maintains the "almost basic" structure as outlined above.
_inline_single_op_blocks(teal_sub)
_inline_singly_referenced_blocks(teal_sub)
_remove_jump_fallthroughs(teal_sub)
def _optimize_block(block: models.TealBlock, *, level: int) -> None:
modified = True
while modified:
modified = False
if level > 0:
modified = perform_constant_stack_shuffling(block) or modified
modified = simplify_repeated_rotation_ops(block) or modified
modified = peephole(block, level) or modified
# we don't do dup/dupn collapse in the above loop, but after it.
# it's easier to deal with expanded dup/dupn instructions above when looking at
# stack shuffling etc, but once it's done we save ops / program size by collapsing them
constant_dupn_insertion(block)
constant_dup2_insertion(block)
if level >= 2:
# this is a brute-force search which can be slow at times,
# so it's only done once and only at higher optimisation levels
block.ops[:] = repeated_rotation_ops_search(block.ops)
# simplifying uncover/cover 1 to swap is easier to do after other rotation optimizations
simplify_swap_ops(block)
def _inline_jump_chains(teal_sub: models.TealSubroutine) -> None:
# build a map of any blocks that are just an unconditional branch to their targets
jumps = dict[str, str]()
for block_idx, block in enumerate(teal_sub.blocks.copy()):
match block.ops:
case [models.Branch(target=target_label) as b]:
if b.stack_manipulations:
logger.debug(
"not inlining jump-block due to stack manipulations",
location=b.source_location,
)
else:
jumps[block.label] = target_label
logger.debug(f"removing jump-chain block {block.label}")
teal_sub.blocks.pop(block_idx)
# now back-propagate any chains
replacements = dict[str, str]()
for src, target in jumps.items():
while True:
try:
target = jumps[target]
except KeyError:
break
replacements[src] = target
logger.debug(f"branching to {src} will be replaced with {target}")
for block in teal_sub.blocks:
for op_idx, op in enumerate(block.ops):
if isinstance(op, models.Branch | models.BranchNonZero | models.BranchZero):
if op.target in replacements:
block.ops[op_idx] = attrs.evolve(op, target=replacements[op.target])
elif isinstance(op, models.Switch | models.Match):
block.ops[op_idx] = attrs.evolve(
op, targets=[replacements.get(t, t) for t in op.targets]
)
def _inline_single_op_blocks(teal_sub: models.TealSubroutine) -> None:
# TODO: this should only encounter exiting ops, so we don't need a traversal to find unused,
# just keep track of predecessors??
single_op_blocks = {b.label: b.ops for b in teal_sub.blocks if len(b.ops) == 1}
modified = False
for teal_block, next_block in itertools.zip_longest(teal_sub.blocks, teal_sub.blocks[1:]):
match teal_block.ops[-1]:
case models.Branch(target=target_label) as branch_op if (
(replace_ops := single_op_blocks.get(target_label))
and (next_block is None or target_label != next_block.label)
):
modified = True
(replace_op,) = replace_ops
# we shouldn't encounter any branching ops, since any block that
# is just an unconditional branch has already been inlined, and
# at this point blocks should still have an unconditional exit as the final op,
# which rules out bz/bnz/match/switch, leaving only exiting ops
# like retsub, return, or err.
# this also means we can keep track of which blocks to eliminate without having
# to do a traversal, thus the assertion
assert isinstance(replace_op, models.ControlOp) and not replace_op.targets
logger.debug(
f"replacing `{branch_op.teal()}` with `{replace_op.teal()}`",
location=branch_op.source_location,
)
preserve_stack_manipulations(teal_block.ops, slice(-1, None), replace_ops)
if modified:
# if any were inlined, they may no longer be referenced and thus removable
_remove_unreachable_blocks(teal_sub)
def _remove_unreachable_blocks(teal_sub: models.TealSubroutine) -> None:
entry = teal_sub.blocks[0]
to_visit = [entry]
unreachable_blocks_by_label = {b.label: b for b in teal_sub.blocks[1:]}
while to_visit:
current_block = to_visit.pop()
for op in current_block.ops:
if isinstance(op, models.ControlOp):
to_visit.extend(
target
for target_label in op.targets
if (target := unreachable_blocks_by_label.pop(target_label, None))
)
if unreachable_blocks_by_label:
logger.debug(f"removing unreachable blocks: {list(map(str, unreachable_blocks_by_label))}")
teal_sub.blocks[:] = [
b for b in teal_sub.blocks if b.label not in unreachable_blocks_by_label
]
def _inline_singly_referenced_blocks(teal_sub: models.TealSubroutine) -> None:
predecessors = defaultdict[str, list[str]](list)
predecessors[teal_sub.blocks[0].label].append("<entrypoint>")
for block in teal_sub.blocks:
for op in block.ops:
if isinstance(op, models.ControlOp):
for target_label in op.targets:
predecessors[target_label].append(block.label)
pairs = dict[str, str]()
inlineable = set[str]()
for block in teal_sub.blocks:
match block.ops[-1]:
case models.Branch(target=target_label):
target_predecessors = predecessors[target_label]
if len(target_predecessors) == 1:
assert target_predecessors == [block.label]
pairs[block.label] = target_label
inlineable.add(target_label)
blocks_by_label = {b.label: b for b in teal_sub.blocks}
result = list[models.TealBlock]()
for block in teal_sub.blocks:
this_label = block.label
if this_label not in inlineable:
result.append(block)
while (next_label := pairs.get(this_label)) is not None:
logger.debug(f"inlining single reference block {next_label} into {block.label}")
next_block = blocks_by_label[next_label]
preserve_stack_manipulations(block.ops, slice(-1, None), next_block.ops)
this_label = next_label
teal_sub.blocks[:] = result
def _remove_jump_fallthroughs(teal_sub: models.TealSubroutine) -> None:
for block, next_block in zip(teal_sub.blocks, teal_sub.blocks[1:], strict=False):
match block.ops[-1]:
# we guard against having stack manipulations but only one op, thus nowhere to put
# them, but this should only occur in O0 as in higher levels, blocks with just a b
# will already be inlined
case models.Branch(
target=target_label, stack_manipulations=sm
) if target_label == next_block.label and (len(block.ops) > 1 or not sm):
logger.debug(f"removing explicit jump to fall-through block {next_block.label}")
block.ops.pop()
if block.ops: # guard is only for O0 case
block.ops[-1] = attrs.evolve(
block.ops[-1],
stack_manipulations=(*block.ops[-1].stack_manipulations, *sm),
)
|
algorandfoundation/puya
|
src/puya/teal/optimize/main.py
|
Python
|
NOASSERTION
| 10,888 |
import attrs
from puya.teal import models
from puya.teal._util import preserve_stack_manipulations
from puya.teal.optimize._data import (
COMMUTATIVE_OPS,
LOAD_OP_CODES,
LOAD_OP_CODES_INCL_OFFSET,
ORDERING_OPS,
STORE_OPS_INCL_OFFSET,
)
from puya.utils import invert_ordered_binary_op
def peephole(block: models.TealBlock, opt_level: int) -> bool:
start_idx = 0
stack_height = block.entry_stack_height
any_modified = False
result = block.ops.copy()
while start_idx < len(result):
modified = False
window: slice | None = None
if opt_level > 0 and not modified and start_idx < len(result) - 3:
window = slice(start_idx, start_idx + 4)
new_values, modified = _optimize_quadruplet(*result[window])
if opt_level > 0 and not modified and start_idx < len(result) - 2:
window = slice(start_idx, start_idx + 3)
new_values, modified = _optimize_triplet(*result[window], stack_height=stack_height)
if not modified and start_idx < len(result) - 1:
window = slice(start_idx, start_idx + 2)
new_values, modified = _optimize_pair(*result[window])
if not modified:
window = slice(start_idx, start_idx + 1)
new_values, modified = optimize_single(*result[window])
if modified:
assert window is not None
any_modified = True
preserve_stack_manipulations(result, window, new_values)
else:
stack_height += result[start_idx].stack_height_delta
start_idx += 1 # go to next
block.ops[:] = result
return any_modified
def is_redundant_rotate(a: models.TealOp, b: models.TealOp) -> bool:
match a, b:
case models.Cover(n=a_n), models.Uncover(n=b_n) if a_n == b_n:
return True
case models.Uncover(n=a_n), models.Cover(n=b_n) if a_n == b_n:
return True
return is_stack_swap(a) and is_stack_swap(b)
def is_stack_swap(op: models.TealOp) -> bool:
return op.op_code == "swap" or (op.op_code in ("cover", "uncover") and op.immediates[0] == 1)
def optimize_single(a: models.TealOp) -> tuple[list[models.TealOp], bool]:
if a.op_code in ("cover", "uncover") and a.immediates == (0,):
return [], True
if a.op_code == "dig" and a.immediates == (0,):
return [
models.Dup(
source_location=a.source_location,
stack_manipulations=a.stack_manipulations,
)
], True
if a.op_code == "popn" and a.immediates == (1,):
return [
models.Pop(
source_location=a.source_location,
stack_manipulations=a.stack_manipulations,
)
], True
return [a], False
def _optimize_pair(a: models.TealOp, b: models.TealOp) -> tuple[list[models.TealOp], bool]:
if is_redundant_rotate(a, b):
return [], True
if is_stack_swap(a):
# `swap; pop` -> `bury 1`
if b.op_code == "pop":
return [models.Bury(n=1, source_location=b.source_location)], True
if b.op_code in COMMUTATIVE_OPS:
return [b], True
if b.op_code in ORDERING_OPS:
inverse_ordering_op = invert_ordered_binary_op(b.op_code)
return [attrs.evolve(b, op_code=inverse_ordering_op)], True
match a, b:
# `frame_dig n; frame_bury n` is redundant
case models.FrameDig(n=dig_n), models.FrameBury(n=bury_n) if dig_n == bury_n:
return [], True
# `frame_bury n; frame_dig n` can be simplified to dup; frame_bury n
case models.FrameBury(n=dig_n), models.FrameDig(n=bury_n) if dig_n == bury_n:
return [models.Dup(source_location=None), a], True
# `dup; swap` -> `dup`
case models.TealOp(op_code="dup" | "dupn"), maybe_swap if is_stack_swap(maybe_swap):
return [a], True
# combine consecutive dup/dupn's
case models.TealOp(op_code="dup" | "dupn"), models.TealOp(op_code="dup" | "dupn"):
(n1,) = a.immediates or (1,)
assert isinstance(n1, int)
(n2,) = b.immediates or (1,)
assert isinstance(n2, int)
return [models.DupN(n=n1 + n2, source_location=a.source_location)], True
# combine consecutive pop/popn's
case models.TealOp(op_code="pop" | "popn"), models.TealOp(op_code="pop" | "popn"):
(n1,) = a.immediates or (1,)
assert isinstance(n1, int)
(n2,) = b.immediates or (1,)
assert isinstance(n2, int)
return [models.PopN(n=n1 + n2, source_location=a.source_location)], True
# `dig 1; dig 1` -> `dup2`
case models.TealOpUInt8(op_code="dig", n=1), models.TealOpUInt8(op_code="dig", n=1):
return [models.Dup2(source_location=a.source_location or b.source_location)], True
return [a, b], False
def _optimize_triplet(
a: models.TealOp, b: models.TealOp, c: models.TealOp, *, stack_height: int
) -> tuple[list[models.TealOp], bool]:
if _frame_digs_overlap_with_ops(stack_height, a, b, c):
return [a, b, c], False
# `'cover 3; cover 3; swap` -> `uncover 2; uncover 3`
if (
is_stack_swap(c)
and (a.op_code == "cover" and a.immediates[0] == 3)
and (b.op_code == "cover" and b.immediates[0] == 3)
):
return [
models.Uncover(n=2, source_location=a.source_location),
models.Uncover(n=3, source_location=b.source_location),
], True
# `swap; (consumes=0, produces=1); uncover 2` -> `(consume=0, produces=1); swap`
if (
is_stack_swap(a)
and (c.op_code == "uncover" and c.immediates[0] == 2)
and (
b.op_code in LOAD_OP_CODES_INCL_OFFSET
# only count digs if they go below the swap
and (b.op_code != "dig" or int(b.immediates[0]) >= 2)
)
):
return [b, a], True
# <load A>; <load B>; swap -> <load B>; <load A>
if (
is_stack_swap(c)
and a.op_code in LOAD_OP_CODES_INCL_OFFSET
and b.op_code in LOAD_OP_CODES_INCL_OFFSET
# cant swap dig 0, which will become a dup anyway
and (not isinstance(b, models.Dig) or b.n)
):
if isinstance(a, models.Dig):
a = attrs.evolve(a, n=a.n + 1)
if isinstance(b, models.Dig):
new_n = b.n - 1
if new_n == 0:
b = models.Dup(source_location=b.source_location)
else:
b = attrs.evolve(b, n=new_n)
return [b, a], True
# swap; <store A>; <store B> -> <store B>; <store A>
if (
is_stack_swap(a)
and b.op_code in STORE_OPS_INCL_OFFSET
and c.op_code in STORE_OPS_INCL_OFFSET
):
height_below_swap = stack_height - 2
if not (
(b.op_code == "frame_bury" and int(b.immediates[0]) >= height_below_swap)
or (c.op_code == "frame_bury" and int(c.immediates[0]) >= height_below_swap)
or (
# can't swap ops if store order is important
# e.g. itxn_field ApplicationArgs or frame_bury -1
b.op_code in ("frame_bury", "itxn_field")
and b.op_code == c.op_code
and b.immediates == c.immediates
)
):
return [c, b], True
match a, b, c:
# `uncover 2; swap; uncover 2` is equivalent to `swap`
case models.Uncover(n=2), maybe_swap, models.Uncover(n=2) if is_stack_swap(maybe_swap):
return [maybe_swap], True
# `dup; cover 2; swap` can be replaced by `dup; uncover 2`
case models.Dup(), models.Cover(n=2), maybe_swap if is_stack_swap(maybe_swap):
return [
a,
models.Uncover(n=2, source_location=b.source_location or c.source_location),
], True
# `uncover n; dup; cover n+1` can be replaced with `dig n`
# this occurs when the x-stack becomes the l-stack
if (
a.op_code == "uncover"
and b.op_code == "dup"
and c.op_code == "cover"
and isinstance((n := a.immediates[0]), int)
and (n + 1) == c.immediates[0]
):
return [
models.Dig(
n=n, source_location=a.source_location or b.source_location or c.source_location
)
], True
return [a, b, c], False
def _frame_digs_overlap_with_ops(stack_height: int, *ops: models.TealOp) -> bool:
"""
Check to see if there is a frame_dig in the sequence that could be impacted
if ops were re-ordered/eliminated/otherwise optimized.
"""
curr_height = min_stack_height = stack_height
for op in ops:
if op.op_code == "frame_dig":
n = op.immediates[0]
assert isinstance(n, int)
if n >= min_stack_height:
return True
curr_height -= op.consumes
min_stack_height = min(curr_height, min_stack_height)
curr_height += op.produces
return False
def _optimize_quadruplet(
a: models.TealOp, b: models.TealOp, c: models.TealOp, d: models.TealOp
) -> tuple[list[models.TealOp], bool]:
# `swap; <re-orderable load op>; swap; uncover 2` -> `dig|uncover n; cover 2`
if (
is_stack_swap(a)
and (
b.op_code in LOAD_OP_CODES
or (b.op_code in ("dig", "uncover") and int(b.immediates[0]) >= 2)
or (b.op_code == "frame_dig" and int(b.immediates[0]) < 0)
)
and is_stack_swap(c)
and (d.op_code == "uncover" and d.immediates[0] == 2)
):
return [b, models.Cover(n=2, source_location=d.source_location)], True
return [a, b, c, d], False
|
algorandfoundation/puya
|
src/puya/teal/optimize/peephole.py
|
Python
|
NOASSERTION
| 9,736 |
from collections.abc import Sequence
from puya.teal import models
from puya.teal._util import preserve_stack_manipulations
def _simplify_repeated_rotation_ops(
maybe_simplify: list[models.Cover | models.Uncover],
) -> tuple[Sequence[models.Cover | models.Uncover], bool]:
first = maybe_simplify[0]
is_cover = isinstance(first, models.Cover)
n = first.n
number_of_ops = len(maybe_simplify)
number_of_inverse = n + 1 - number_of_ops
while number_of_inverse < 0:
number_of_inverse += n + 1
if number_of_inverse >= number_of_ops:
return maybe_simplify, False
inverse_op: models.Cover | models.Uncover = (
models.Uncover(n=n, source_location=first.source_location)
if is_cover
else models.Cover(n=n, source_location=first.source_location)
)
simplified = [inverse_op] * number_of_inverse
return simplified, True
def simplify_repeated_rotation_ops(block: models.TealBlock) -> bool:
result = block.ops.copy()
maybe_simplify = list[models.Cover | models.Uncover]()
modified = False
end_idx = 0
while end_idx < len(result):
op = result[end_idx]
if isinstance(op, models.Cover | models.Uncover) and (
not maybe_simplify or op == maybe_simplify[0]
):
maybe_simplify.append(op)
else:
if maybe_simplify:
maybe_simplified, modified_ = _simplify_repeated_rotation_ops(maybe_simplify)
modified = modified or modified_
preserve_stack_manipulations(
result, slice(end_idx - len(maybe_simplify), end_idx), maybe_simplified
)
end_idx += len(maybe_simplified) - len(maybe_simplify)
maybe_simplify = []
if isinstance(op, models.Cover | models.Uncover):
maybe_simplify.append(op)
end_idx += 1
if maybe_simplify:
maybe_simplified, modified_ = _simplify_repeated_rotation_ops(maybe_simplify)
modified = modified or modified_
idx = len(result)
preserve_stack_manipulations(
result, slice(idx - len(maybe_simplify), idx), maybe_simplified
)
block.ops[:] = result
return modified
def simplify_swap_ops(block: models.TealBlock) -> bool:
result = list[models.TealOp]()
modified = False
for op in block.ops:
if isinstance(op, models.Cover | models.Uncover) and (op.n == 1):
modified = True
result.append(
models.Swap(
source_location=op.source_location, stack_manipulations=op.stack_manipulations
)
)
else:
result.append(op)
block.ops[:] = result
return modified
|
algorandfoundation/puya
|
src/puya/teal/optimize/repeated_rotations.py
|
Python
|
NOASSERTION
| 2,766 |
import functools
import itertools
import typing
from collections.abc import Sequence
import attrs
from puya import log
from puya.teal import models
from puya.teal._util import preserve_stack_manipulations
TealOpSequence = tuple[models.TealOpUInt8, ...]
logger = log.get_logger(__name__)
class InvalidOpSequenceError(Exception):
pass
@attrs.define
class TealStack:
stack: list[int]
@classmethod
def from_stack_size(cls, stack_size: int) -> "TealStack":
return cls(stack=list(range(stack_size)))
def apply(self, ops: Sequence[models.TealOpUInt8]) -> "TealStack":
stack = TealStack(self.stack.copy())
for op in ops:
n = op.n
if n:
index = len(stack.stack) - n - 1
if index < 0 or index >= len(stack.stack):
raise InvalidOpSequenceError
match op.op_code:
case "cover":
stack.stack.insert(index, stack.stack.pop())
case "uncover":
stack.stack.append(stack.stack.pop(index))
case _:
raise InvalidOpSequenceError
return stack
@functools.cache
def simplify_rotation_ops(original_ops: TealOpSequence) -> TealOpSequence | None:
num_rot_ops = len(original_ops)
max_rot_op_n = 0
for o in original_ops:
max_rot_op_n = max(max_rot_op_n, o.n)
original_stack = TealStack.from_stack_size(max_rot_op_n + 1)
expected = original_stack.apply(original_ops)
# entire sequence can be removed!
if expected == original_stack:
return ()
possible_rotation_ops = get_possible_rotation_ops(max_rot_op_n)
original_stack_result = original_stack.apply(original_ops)
# TODO: use a non-bruteforce approach and/or capture common simplifications as data
for num_rotation_ops in range(num_rot_ops):
for maybe_ops in itertools.permutations(possible_rotation_ops, num_rotation_ops):
try:
stack = original_stack.apply(maybe_ops)
except InvalidOpSequenceError:
continue
if expected == stack:
assert original_stack_result == original_stack.apply(maybe_ops)
return tuple(attrs.evolve(op, source_location=None) for op in maybe_ops)
return None
@functools.cache
def get_possible_rotation_ops(n: int) -> TealOpSequence:
possible_ops = list[models.TealOpUInt8]()
for i in range(1, n + 1):
possible_ops.append(models.Cover(i, source_location=None))
possible_ops.append(models.Uncover(i, source_location=None))
return tuple(possible_ops)
ROTATION_SIMPLIFY_OPS = frozenset(
[
"cover",
"uncover",
]
)
def repeated_rotation_ops_search(teal_ops: list[models.TealOp]) -> list[models.TealOp]:
maybe_remove_rotations = list[models.TealOpUInt8]()
result = list[models.TealOp]()
for teal_op in teal_ops:
if teal_op.op_code in ROTATION_SIMPLIFY_OPS:
maybe_remove_rotations.append(typing.cast(models.TealOpUInt8, teal_op))
else:
maybe_simplified = _maybe_simplified(maybe_remove_rotations)
maybe_remove_rotations = []
result.extend(maybe_simplified)
result.append(teal_op)
result.extend(_maybe_simplified(maybe_remove_rotations))
return result
def _maybe_simplified(
maybe_remove_rotations: list[models.TealOpUInt8], window_size: int = 5
) -> Sequence[models.TealOpUInt8]:
if len(maybe_remove_rotations) < 2:
return maybe_remove_rotations
for start_idx in range(len(maybe_remove_rotations) - 1):
window_slice = slice(start_idx, start_idx + window_size + 1)
window = maybe_remove_rotations[window_slice]
simplified = simplify_rotation_ops(tuple(window))
if simplified is not None:
logger.debug(
f"Replaced '{'; '.join(map(str, maybe_remove_rotations))}'"
f" with '{'; '.join(map(str, simplified))}',"
f" reducing by {len(maybe_remove_rotations) - len(simplified)} ops by search"
)
result_ = maybe_remove_rotations.copy()
preserve_stack_manipulations(result_, window_slice, simplified)
assert result_ != maybe_remove_rotations
return result_
return maybe_remove_rotations
|
algorandfoundation/puya
|
src/puya/teal/optimize/repeated_rotations_search.py
|
Python
|
NOASSERTION
| 4,405 |
import textwrap
import attrs
from puya.context import ArtifactCompileContext
from puya.teal import models
def emit_teal(context: ArtifactCompileContext, program: models.TealProgram) -> str:
indent = " " * 4
result = [
f"#pragma version {program.avm_version}",
"#pragma typetrack false",
"",
]
for idx, subroutine in enumerate(program.all_subroutines):
if idx > 0:
result.append("")
result.append(f"// {subroutine.signature}")
for block in subroutine.blocks:
last_location = None
result.append(f"{block.label}:")
for teal_op in block.ops:
if context.options.debug_level and (op_loc := teal_op.source_location):
whole_lines_location = attrs.evolve(
op_loc.with_comments(), column=None, end_column=None
)
if whole_lines_location != last_location:
last_location = whole_lines_location
src = context.try_get_source(whole_lines_location)
if src is not None:
result.append(f"{indent}// {whole_lines_location}")
lines = textwrap.dedent("\n".join(src)).splitlines()
result.extend(f"{indent}// {line.rstrip()}" for line in lines)
result.append(f"{indent}{teal_op.teal()}")
result.append("")
return "\n".join(result)
|
algorandfoundation/puya
|
src/puya/teal/output.py
|
Python
|
NOASSERTION
| 1,513 |
algorandfoundation/puya
|
src/puya/ussemble/__init__.py
|
Python
|
NOASSERTION
| 0 |
|
import struct
import typing
from collections import defaultdict
from collections.abc import Callable, Iterable, Mapping, Sequence
from puya import log
from puya.compilation_artifacts import DebugEvent
from puya.errors import InternalError
from puya.parse import SourceLocation
from puya.teal import models as teal
from puya.ussemble import models
from puya.ussemble.context import AssembleContext
from puya.ussemble.debug import build_debug_info
from puya.ussemble.op_spec import OP_SPECS
from puya.ussemble.op_spec_models import ImmediateEnum, ImmediateKind
logger = log.get_logger(__name__)
_BRANCHING_OPS = {
op.name
for op in OP_SPECS.values()
if any(i in (ImmediateKind.label, ImmediateKind.label_array) for i in op.immediates)
}
def assemble_bytecode_and_debug_info(
ctx: AssembleContext, program: teal.TealProgram
) -> models.AssembledProgram:
function_block_ids = {s.blocks[0].label: s.signature.name for s in program.all_subroutines}
version_bytes = _encode_varuint(program.avm_version)
pc_events = defaultdict[int, DebugEvent](lambda: DebugEvent())
pc_ops = dict[int, models.AVMOp]()
label_pcs = dict[str, int]()
# pc includes version header
pc = len(version_bytes)
# first pass lowers teal ops, calculate pcs, and captures debug info
for subroutine in program.all_subroutines:
current_event = pc_events[pc]
current_event["subroutine"] = subroutine.signature.name
current_event["params"] = {
p.local_id: p.atype.name or "" for p in subroutine.signature.parameters
}
stack = list[str]()
for block in subroutine.blocks:
current_event = pc_events[pc]
# update stack with correct values on entry to a block
f_stack_height = block.entry_stack_height - len(block.x_stack_in)
stack[f_stack_height:] = block.x_stack_in
label_pcs[block.label] = pc
current_event["block"] = block.label
current_event["stack_in"] = stack.copy()
defined = set[str]()
for op in block.ops:
current_event = pc_events[pc]
avm_op = _lower_op(ctx, op)
# actual label offsets can't be determined until all PC values are known
# so just use a placeholder value initially
op_size = len(_encode_op(avm_op, get_label_offset=lambda _: 0))
assert op_size, "expected non empty bytecode"
_add_op_debug_events(
current_event,
function_block_ids,
op,
# note: stack & defined are mutated
stack,
defined,
)
pc_ops[pc] = avm_op
pc += op_size
# all pc values, including pc after final op
pcs = [*pc_ops, pc]
# second pass assembles final byte code
bytecode = [version_bytes]
for op_index, avm_op in enumerate(pc_ops.values()):
def get_label_offset(label: models.Label) -> int:
# label offset is the signed PC difference
# between the label PC location and the end of the current op
return label_pcs[label.name] - pcs[op_index + 1] # noqa: B023
bytecode.append(_encode_op(avm_op, get_label_offset=get_label_offset))
return models.AssembledProgram(
bytecode=b"".join(bytecode),
debug_info=build_debug_info(ctx, pc_ops, pc_events),
template_variables={
var: ctx.provided_template_variables.get(var, (None, None))[0]
for var in ctx.template_variable_types
},
)
def _add_op_debug_events(
event: DebugEvent,
subroutine_ids: Mapping[str, str],
op: teal.TealOp,
stack: list[str],
defined: set[str],
) -> None:
stack_in = stack.copy()
num_defined = len(defined)
if op.op_code == "callsub":
(func_block,) = op.immediates
assert isinstance(func_block, str), "expected label"
event["callsub"] = subroutine_ids[func_block]
elif op.op_code == "retsub":
event["retsub"] = True
if op.error_message:
event["error"] = op.error_message
event["op"] = op.teal()
for sm in op.stack_manipulations:
match sm:
case teal.StackConsume(n=n):
for _ in range(n):
stack.pop()
case teal.StackExtend() as se:
stack.extend(se.local_ids)
case teal.StackDefine() as sd:
defined.update(sd.local_ids)
case teal.StackInsert() as si:
index = len(stack) - si.depth
stack.insert(index, si.local_id)
case teal.StackPop() as sp:
index = len(stack) - sp.depth - 1
stack.pop(index)
case _:
typing.assert_never(sm)
if len(defined) != num_defined:
event["defined_out"] = sorted(set(defined) & set(stack))
if stack_in != stack:
event["stack_out"] = stack.copy()
def _lower_op(ctx: AssembleContext, op: teal.TealOp) -> models.AVMOp:
loc = op.source_location
match op:
case teal.TemplateVar() | teal.Int() | teal.Byte():
raise InternalError(f"{op} should have been eliminated during TEAL phase", loc)
case teal.IntBlock(constants=constants):
return models.AVMOp(
op_code=op.op_code,
immediates=[_resolve_template_vars(ctx, int, constants.items())],
source_location=loc,
)
case teal.BytesBlock(constants=constants):
return models.AVMOp(
op_code=op.op_code,
immediates=[
_resolve_template_vars(ctx, bytes, [(b, es[1]) for b, es in constants.items()])
],
source_location=loc,
)
case teal.PushBytes(value=bytes_value):
return models.AVMOp(
op_code=op.op_code,
immediates=[bytes_value],
source_location=loc,
)
case teal.PushBytess(values=values):
return models.AVMOp(
op_code=op.op_code,
immediates=[[t[0] for t in values]],
source_location=loc,
)
case teal.PushInts(values=values):
return models.AVMOp(
op_code=op.op_code,
immediates=[values],
source_location=loc,
)
case teal.CallSub(target=label_id):
return models.AVMOp(
op_code=op.op_code,
immediates=[models.Label(name=label_id)],
source_location=loc,
)
case teal.TealOp(op_code="b" | "bz" | "bnz", immediates=[str(label_id)]):
return models.AVMOp(
op_code=op.op_code,
immediates=[models.Label(name=label_id)],
source_location=loc,
)
case teal.TealOp(
op_code="switch" | "match" as op_code, immediates=label_ids
) if _is_sequence(label_ids, str):
return models.AVMOp(
op_code=op_code,
immediates=[[models.Label(label_id) for label_id in label_ids]],
source_location=loc,
)
case teal.TealOp() if op.op_code not in _BRANCHING_OPS:
return models.AVMOp(op_code=op.op_code, immediates=op.immediates, source_location=loc)
case _:
raise InternalError(f"invalid teal op: {op}", loc)
def _resolve_template_vars[T: (int, bytes)](
ctx: AssembleContext, typ: type[T], values: Iterable[tuple[T | str, SourceLocation | None]]
) -> Sequence[T]:
result = []
for value_or_template, var_loc in values:
if not isinstance(value_or_template, str):
value = value_or_template
else:
try:
maybe_value, val_loc = ctx.provided_template_variables[value_or_template]
except KeyError:
# if bytecode isn't required for this program, then a dummy value is sufficient
bytecode_required = ctx.options.output_bytecode and (
ctx.artifact_ref in ctx.compilation_set
)
if ctx.is_reference or bytecode_required:
logger.error( # noqa: TRY400
f"template variable not defined: {value_or_template}", location=var_loc
)
value = typ()
else:
if isinstance(maybe_value, typ):
value = maybe_value
else:
logger.error(
f"invalid template value type for {value_or_template!r},"
f" expected {typ.__name__}",
location=val_loc or var_loc,
)
value = typ()
result.append(value)
return result
def _encode_op(op: models.AVMOp, *, get_label_offset: Callable[[models.Label], int]) -> bytes:
op_spec = op.op_spec
bytecode = _encode_uint8(op.op_spec.code)
for immediate_kind, immediate in zip(op_spec.immediates, op.immediates, strict=True):
match immediate_kind:
case ImmediateKind.uint8 if isinstance(immediate, int):
bytecode += _encode_uint8(immediate)
case ImmediateKind.int8 if isinstance(immediate, int):
bytecode += _encode_int8(immediate)
case ImmediateEnum(codes=enum_codes) if isinstance(immediate, str):
immediate_code = enum_codes[immediate]
bytecode += _encode_uint8(immediate_code)
case ImmediateKind.bytes if isinstance(immediate, bytes):
bytecode += _encode_bytes(immediate)
case ImmediateKind.varuint if isinstance(immediate, int):
bytecode += _encode_varuint(immediate)
case ImmediateKind.varuint_array if _is_sequence(immediate, int):
bytecode += _encode_varuint_array(immediate)
case ImmediateKind.bytes_array if _is_sequence(immediate, bytes):
bytecode += _encode_bytes_array(immediate)
case ImmediateKind.label if isinstance(immediate, models.Label):
offset = get_label_offset(immediate)
bytecode += _encode_label(offset)
case ImmediateKind.label_array if _is_sequence(immediate, models.Label):
offsets = [get_label_offset(label) for label in immediate]
bytecode += _encode_label_array(offsets)
case _:
raise InternalError(f"Invalid op: {op}")
return bytecode
_encode_uint8 = struct.Struct(">B").pack
_encode_int8 = struct.Struct(">b").pack
_encode_label = struct.Struct(">h").pack
def _encode_varuint(value: int) -> bytes:
bits = value & 0x7F
value >>= 7
result = b""
while value:
result += _encode_uint8(0x80 | bits)
bits = value & 0x7F
value >>= 7
return result + _encode_uint8(bits)
def _encode_bytes(value: bytes) -> bytes:
return _encode_varuint(len(value)) + value
def _encode_varuint_array(values: Sequence[int]) -> bytes:
return b"".join((_encode_varuint(len(values)), *map(_encode_varuint, values)))
def _encode_label_array(values: Sequence[int]) -> bytes:
# note: op spec describes a label array size as a varuint
# however actual algod go implementation is just a single byte
# additionally max number of labels is 255
return b"".join((_encode_uint8(len(values)), *map(_encode_label, values)))
def _encode_bytes_array(values: Sequence[bytes]) -> bytes:
return b"".join(
(
_encode_varuint(len(values)),
*map(_encode_bytes, values),
),
)
def _is_sequence[_T](maybe: object, typ: type[_T]) -> typing.TypeGuard[Sequence[_T]]:
return isinstance(maybe, Sequence) and all(isinstance(m, typ) for m in maybe)
|
algorandfoundation/puya
|
src/puya/ussemble/assemble.py
|
Python
|
NOASSERTION
| 12,046 |
import typing
from collections.abc import Mapping
from functools import cached_property
import attrs
from puya.avm import AVMType
from puya.compilation_artifacts import TemplateValue
from puya.context import CompileContext
from puya.program_refs import ContractReference, LogicSigReference
@attrs.frozen(kw_only=True)
class AssembleContext(CompileContext):
artifact_ref: ContractReference | LogicSigReference
is_reference: bool
template_variable_types: Mapping[str, typing.Literal[AVMType.uint64, AVMType.bytes]]
"""Template variables that are required and their types"""
template_constants: Mapping[str, TemplateValue] | None
"""Template variables provided via compilation"""
@cached_property
def provided_template_variables(self) -> Mapping[str, TemplateValue]:
return {
**{k: (v, None) for k, v in self.options.template_variables.items()},
**(self.template_constants or {}),
}
@cached_property
def offset_pc_from_constant_blocks(self) -> bool:
# only need to offset PC if there are any unspecified template variables
return not (self.provided_template_variables.keys() >= self.template_variable_types.keys())
|
algorandfoundation/puya
|
src/puya/ussemble/context.py
|
Python
|
NOASSERTION
| 1,214 |
from collections.abc import Mapping, Sequence
from puya.compilation_artifacts import DebugEvent, DebugInfo
from puya.parse import SourceLocation
from puya.ussemble import models
from puya.ussemble.context import AssembleContext
from puya.utils import normalize_path
def build_debug_info(
ctx: AssembleContext,
pc_ops: Mapping[int, models.AVMOp],
pc_events: Mapping[int, DebugEvent],
) -> DebugInfo:
op_pc_offset = pc_offset = 0
if ctx.offset_pc_from_constant_blocks:
for idx, (pc, node) in enumerate(pc_ops.items()):
# stop at first op that is not a constant block
if node.op_code not in ("intcblock", "bytecblock"):
# only need to set pc_offset if there was a constant block present (i.e. idx != 0)
# this is because the first op will be after the version byte and so will always
# have a pc of 1, but a op_pc_offset of 0 means the values are not offset
if idx:
op_pc_offset = idx
pc_offset = pc
break
events = {pc - pc_offset: event for pc, event in pc_events.items() if pc >= pc_offset}
source_map = {
pc - pc_offset: node.source_location for pc, node in pc_ops.items() if pc >= pc_offset
}
files = sorted(map(normalize_path, {s.file for s in source_map.values() if s and s.file}))
mappings = _get_src_mappings(source_map, files)
return DebugInfo(
version=3,
sources=files,
mappings=";".join(mappings),
op_pc_offset=op_pc_offset,
pc_events=events,
)
def _get_src_mappings(
source_map: Mapping[int, SourceLocation | None],
files: Sequence[str],
) -> list[str]:
mappings = []
previous_source_index = 0
previous_line = 0
previous_column = 0
for pc in range(max(source_map) + 1):
loc = source_map.get(pc)
if not loc or not loc.file:
mappings.append("")
continue
source_index = files.index(normalize_path(loc.file))
line = loc.line - 1 # make 0-indexed
column = loc.column or 0
mappings.append(
_base64vlq_encode(
0, # generated col offset, always 0 for AVM byte code
source_index - previous_source_index,
line - previous_line,
column - previous_column,
)
)
previous_source_index = source_index
previous_line = line
previous_column = column
return mappings
def _base64vlq_encode(*values: int) -> str:
"""Encode integers to a VLQ value"""
results = []
b64_chars = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
b64_encode = b64_chars.decode().__getitem__
shift = 5
flag = 1 << shift
mask = flag - 1
for v in values:
# add sign bit
v = (abs(v) << 1) | int(v < 0)
while True:
to_encode = v & mask
v >>= shift
results.append(b64_encode(to_encode | (v and flag)))
if not v:
break
return "".join(results)
|
algorandfoundation/puya
|
src/puya/ussemble/debug.py
|
Python
|
NOASSERTION
| 3,128 |
import typing
from collections.abc import Mapping
from puya.avm import AVMType
from puya.compilation_artifacts import TemplateValue
from puya.context import CompileContext
from puya.program_refs import ContractReference, LogicSigReference
from puya.teal import models as teal
from puya.ussemble import models
from puya.ussemble.assemble import assemble_bytecode_and_debug_info
from puya.ussemble.context import AssembleContext
from puya.utils import attrs_extend
def assemble_program(
ctx: CompileContext,
ref: ContractReference | LogicSigReference,
program: teal.TealProgram,
*,
template_constants: Mapping[str, TemplateValue] | None = None,
is_reference: bool = False,
) -> models.AssembledProgram:
referenced_template_vars = _gather_template_variables(program)
assemble_ctx = attrs_extend(
AssembleContext,
ctx,
artifact_ref=ref,
is_reference=is_reference,
template_variable_types=referenced_template_vars,
template_constants=template_constants,
)
return assemble_bytecode_and_debug_info(assemble_ctx, program)
def _gather_template_variables(
program: teal.TealProgram,
) -> Mapping[str, typing.Literal[AVMType.uint64, AVMType.bytes]]:
return {
t: AVMType.uint64 if isinstance(op, teal.IntBlock) else AVMType.bytes
for sub in program.all_subroutines
for block in sub.blocks
for op in block.ops
if isinstance(op, teal.IntBlock | teal.BytesBlock)
for t in op.constants
if isinstance(t, str)
}
|
algorandfoundation/puya
|
src/puya/ussemble/main.py
|
Python
|
NOASSERTION
| 1,555 |
from collections.abc import Mapping, Sequence
import attrs
from puya.compilation_artifacts import DebugInfo
from puya.parse import SourceLocation
from puya.ussemble.op_spec import OP_SPECS
from puya.ussemble.op_spec_models import OpSpec
@attrs.frozen(str=False)
class Label:
name: str
def __str__(self) -> str:
return f"{self.name}:"
@attrs.frozen(str=False, eq=False)
class AVMOp:
source_location: SourceLocation | None
op_code: str
immediates: Sequence[
Sequence[int] | int | Sequence[bytes] | bytes | Sequence[Label] | Label | str
]
@property
def op_spec(self) -> OpSpec:
return OP_SPECS[self.op_code]
@attrs.frozen
class AssembledProgram:
bytecode: bytes
debug_info: DebugInfo
template_variables: Mapping[str, int | bytes | None]
"""Indicates template variable values used in compilation"""
|
algorandfoundation/puya
|
src/puya/ussemble/models.py
|
Python
|
NOASSERTION
| 877 |
from puya.ussemble.op_spec_models import ImmediateEnum, ImmediateKind, OpSpec
OP_SPECS = {
"err": OpSpec(name="err", code=0, immediates=[]),
"sha256": OpSpec(name="sha256", code=1, immediates=[]),
"keccak256": OpSpec(name="keccak256", code=2, immediates=[]),
"sha512_256": OpSpec(name="sha512_256", code=3, immediates=[]),
"ed25519verify": OpSpec(name="ed25519verify", code=4, immediates=[]),
"ecdsa_verify": OpSpec(
name="ecdsa_verify",
code=5,
immediates=[ImmediateEnum(codes={"Secp256k1": 0, "Secp256r1": 1})],
),
"ecdsa_pk_decompress": OpSpec(
name="ecdsa_pk_decompress",
code=6,
immediates=[ImmediateEnum(codes={"Secp256k1": 0, "Secp256r1": 1})],
),
"ecdsa_pk_recover": OpSpec(
name="ecdsa_pk_recover",
code=7,
immediates=[ImmediateEnum(codes={"Secp256k1": 0, "Secp256r1": 1})],
),
"+": OpSpec(name="+", code=8, immediates=[]),
"-": OpSpec(name="-", code=9, immediates=[]),
"/": OpSpec(name="/", code=10, immediates=[]),
"*": OpSpec(name="*", code=11, immediates=[]),
"<": OpSpec(name="<", code=12, immediates=[]),
">": OpSpec(name=">", code=13, immediates=[]),
"<=": OpSpec(name="<=", code=14, immediates=[]),
">=": OpSpec(name=">=", code=15, immediates=[]),
"&&": OpSpec(name="&&", code=16, immediates=[]),
"||": OpSpec(name="||", code=17, immediates=[]),
"==": OpSpec(name="==", code=18, immediates=[]),
"!=": OpSpec(name="!=", code=19, immediates=[]),
"!": OpSpec(name="!", code=20, immediates=[]),
"len": OpSpec(name="len", code=21, immediates=[]),
"itob": OpSpec(name="itob", code=22, immediates=[]),
"btoi": OpSpec(name="btoi", code=23, immediates=[]),
"%": OpSpec(name="%", code=24, immediates=[]),
"|": OpSpec(name="|", code=25, immediates=[]),
"&": OpSpec(name="&", code=26, immediates=[]),
"^": OpSpec(name="^", code=27, immediates=[]),
"~": OpSpec(name="~", code=28, immediates=[]),
"mulw": OpSpec(name="mulw", code=29, immediates=[]),
"addw": OpSpec(name="addw", code=30, immediates=[]),
"divmodw": OpSpec(name="divmodw", code=31, immediates=[]),
"intcblock": OpSpec(name="intcblock", code=32, immediates=[ImmediateKind.varuint_array]),
"intc": OpSpec(name="intc", code=33, immediates=[ImmediateKind.uint8]),
"intc_0": OpSpec(name="intc_0", code=34, immediates=[]),
"intc_1": OpSpec(name="intc_1", code=35, immediates=[]),
"intc_2": OpSpec(name="intc_2", code=36, immediates=[]),
"intc_3": OpSpec(name="intc_3", code=37, immediates=[]),
"bytecblock": OpSpec(name="bytecblock", code=38, immediates=[ImmediateKind.bytes_array]),
"bytec": OpSpec(name="bytec", code=39, immediates=[ImmediateKind.uint8]),
"bytec_0": OpSpec(name="bytec_0", code=40, immediates=[]),
"bytec_1": OpSpec(name="bytec_1", code=41, immediates=[]),
"bytec_2": OpSpec(name="bytec_2", code=42, immediates=[]),
"bytec_3": OpSpec(name="bytec_3", code=43, immediates=[]),
"arg": OpSpec(name="arg", code=44, immediates=[ImmediateKind.uint8]),
"arg_0": OpSpec(name="arg_0", code=45, immediates=[]),
"arg_1": OpSpec(name="arg_1", code=46, immediates=[]),
"arg_2": OpSpec(name="arg_2", code=47, immediates=[]),
"arg_3": OpSpec(name="arg_3", code=48, immediates=[]),
"txn": OpSpec(
name="txn",
code=49,
immediates=[
ImmediateEnum(
codes={
"Sender": 0,
"Fee": 1,
"FirstValid": 2,
"FirstValidTime": 3,
"LastValid": 4,
"Note": 5,
"Lease": 6,
"Receiver": 7,
"Amount": 8,
"CloseRemainderTo": 9,
"VotePK": 10,
"SelectionPK": 11,
"VoteFirst": 12,
"VoteLast": 13,
"VoteKeyDilution": 14,
"Type": 15,
"TypeEnum": 16,
"XferAsset": 17,
"AssetAmount": 18,
"AssetSender": 19,
"AssetReceiver": 20,
"AssetCloseTo": 21,
"GroupIndex": 22,
"TxID": 23,
"ApplicationID": 24,
"OnCompletion": 25,
"ApplicationArgs": 26,
"NumAppArgs": 27,
"Accounts": 28,
"NumAccounts": 29,
"ApprovalProgram": 30,
"ClearStateProgram": 31,
"RekeyTo": 32,
"ConfigAsset": 33,
"ConfigAssetTotal": 34,
"ConfigAssetDecimals": 35,
"ConfigAssetDefaultFrozen": 36,
"ConfigAssetUnitName": 37,
"ConfigAssetName": 38,
"ConfigAssetURL": 39,
"ConfigAssetMetadataHash": 40,
"ConfigAssetManager": 41,
"ConfigAssetReserve": 42,
"ConfigAssetFreeze": 43,
"ConfigAssetClawback": 44,
"FreezeAsset": 45,
"FreezeAssetAccount": 46,
"FreezeAssetFrozen": 47,
"Assets": 48,
"NumAssets": 49,
"Applications": 50,
"NumApplications": 51,
"GlobalNumUint": 52,
"GlobalNumByteSlice": 53,
"LocalNumUint": 54,
"LocalNumByteSlice": 55,
"ExtraProgramPages": 56,
"Nonparticipation": 57,
"Logs": 58,
"NumLogs": 59,
"CreatedAssetID": 60,
"CreatedApplicationID": 61,
"LastLog": 62,
"StateProofPK": 63,
"ApprovalProgramPages": 64,
"NumApprovalProgramPages": 65,
"ClearStateProgramPages": 66,
"NumClearStateProgramPages": 67,
}
)
],
),
"global": OpSpec(
name="global",
code=50,
immediates=[
ImmediateEnum(
codes={
"MinTxnFee": 0,
"MinBalance": 1,
"MaxTxnLife": 2,
"ZeroAddress": 3,
"GroupSize": 4,
"LogicSigVersion": 5,
"Round": 6,
"LatestTimestamp": 7,
"CurrentApplicationID": 8,
"CreatorAddress": 9,
"CurrentApplicationAddress": 10,
"GroupID": 11,
"OpcodeBudget": 12,
"CallerApplicationID": 13,
"CallerApplicationAddress": 14,
"AssetCreateMinBalance": 15,
"AssetOptInMinBalance": 16,
"GenesisHash": 17,
"PayoutsEnabled": 18,
"PayoutsGoOnlineFee": 19,
"PayoutsPercent": 20,
"PayoutsMinBalance": 21,
"PayoutsMaxBalance": 22,
}
)
],
),
"gtxn": OpSpec(
name="gtxn",
code=51,
immediates=[
ImmediateKind.uint8,
ImmediateEnum(
codes={
"Sender": 0,
"Fee": 1,
"FirstValid": 2,
"FirstValidTime": 3,
"LastValid": 4,
"Note": 5,
"Lease": 6,
"Receiver": 7,
"Amount": 8,
"CloseRemainderTo": 9,
"VotePK": 10,
"SelectionPK": 11,
"VoteFirst": 12,
"VoteLast": 13,
"VoteKeyDilution": 14,
"Type": 15,
"TypeEnum": 16,
"XferAsset": 17,
"AssetAmount": 18,
"AssetSender": 19,
"AssetReceiver": 20,
"AssetCloseTo": 21,
"GroupIndex": 22,
"TxID": 23,
"ApplicationID": 24,
"OnCompletion": 25,
"ApplicationArgs": 26,
"NumAppArgs": 27,
"Accounts": 28,
"NumAccounts": 29,
"ApprovalProgram": 30,
"ClearStateProgram": 31,
"RekeyTo": 32,
"ConfigAsset": 33,
"ConfigAssetTotal": 34,
"ConfigAssetDecimals": 35,
"ConfigAssetDefaultFrozen": 36,
"ConfigAssetUnitName": 37,
"ConfigAssetName": 38,
"ConfigAssetURL": 39,
"ConfigAssetMetadataHash": 40,
"ConfigAssetManager": 41,
"ConfigAssetReserve": 42,
"ConfigAssetFreeze": 43,
"ConfigAssetClawback": 44,
"FreezeAsset": 45,
"FreezeAssetAccount": 46,
"FreezeAssetFrozen": 47,
"Assets": 48,
"NumAssets": 49,
"Applications": 50,
"NumApplications": 51,
"GlobalNumUint": 52,
"GlobalNumByteSlice": 53,
"LocalNumUint": 54,
"LocalNumByteSlice": 55,
"ExtraProgramPages": 56,
"Nonparticipation": 57,
"Logs": 58,
"NumLogs": 59,
"CreatedAssetID": 60,
"CreatedApplicationID": 61,
"LastLog": 62,
"StateProofPK": 63,
"ApprovalProgramPages": 64,
"NumApprovalProgramPages": 65,
"ClearStateProgramPages": 66,
"NumClearStateProgramPages": 67,
}
),
],
),
"load": OpSpec(name="load", code=52, immediates=[ImmediateKind.uint8]),
"store": OpSpec(name="store", code=53, immediates=[ImmediateKind.uint8]),
"txna": OpSpec(
name="txna",
code=54,
immediates=[
ImmediateEnum(
codes={
"ApplicationArgs": 26,
"Accounts": 28,
"Assets": 48,
"Applications": 50,
"Logs": 58,
"ApprovalProgramPages": 64,
"ClearStateProgramPages": 66,
}
),
ImmediateKind.uint8,
],
),
"gtxna": OpSpec(
name="gtxna",
code=55,
immediates=[
ImmediateKind.uint8,
ImmediateEnum(
codes={
"ApplicationArgs": 26,
"Accounts": 28,
"Assets": 48,
"Applications": 50,
"Logs": 58,
"ApprovalProgramPages": 64,
"ClearStateProgramPages": 66,
}
),
ImmediateKind.uint8,
],
),
"gtxns": OpSpec(
name="gtxns",
code=56,
immediates=[
ImmediateEnum(
codes={
"Sender": 0,
"Fee": 1,
"FirstValid": 2,
"FirstValidTime": 3,
"LastValid": 4,
"Note": 5,
"Lease": 6,
"Receiver": 7,
"Amount": 8,
"CloseRemainderTo": 9,
"VotePK": 10,
"SelectionPK": 11,
"VoteFirst": 12,
"VoteLast": 13,
"VoteKeyDilution": 14,
"Type": 15,
"TypeEnum": 16,
"XferAsset": 17,
"AssetAmount": 18,
"AssetSender": 19,
"AssetReceiver": 20,
"AssetCloseTo": 21,
"GroupIndex": 22,
"TxID": 23,
"ApplicationID": 24,
"OnCompletion": 25,
"ApplicationArgs": 26,
"NumAppArgs": 27,
"Accounts": 28,
"NumAccounts": 29,
"ApprovalProgram": 30,
"ClearStateProgram": 31,
"RekeyTo": 32,
"ConfigAsset": 33,
"ConfigAssetTotal": 34,
"ConfigAssetDecimals": 35,
"ConfigAssetDefaultFrozen": 36,
"ConfigAssetUnitName": 37,
"ConfigAssetName": 38,
"ConfigAssetURL": 39,
"ConfigAssetMetadataHash": 40,
"ConfigAssetManager": 41,
"ConfigAssetReserve": 42,
"ConfigAssetFreeze": 43,
"ConfigAssetClawback": 44,
"FreezeAsset": 45,
"FreezeAssetAccount": 46,
"FreezeAssetFrozen": 47,
"Assets": 48,
"NumAssets": 49,
"Applications": 50,
"NumApplications": 51,
"GlobalNumUint": 52,
"GlobalNumByteSlice": 53,
"LocalNumUint": 54,
"LocalNumByteSlice": 55,
"ExtraProgramPages": 56,
"Nonparticipation": 57,
"Logs": 58,
"NumLogs": 59,
"CreatedAssetID": 60,
"CreatedApplicationID": 61,
"LastLog": 62,
"StateProofPK": 63,
"ApprovalProgramPages": 64,
"NumApprovalProgramPages": 65,
"ClearStateProgramPages": 66,
"NumClearStateProgramPages": 67,
}
)
],
),
"gtxnsa": OpSpec(
name="gtxnsa",
code=57,
immediates=[
ImmediateEnum(
codes={
"ApplicationArgs": 26,
"Accounts": 28,
"Assets": 48,
"Applications": 50,
"Logs": 58,
"ApprovalProgramPages": 64,
"ClearStateProgramPages": 66,
}
),
ImmediateKind.uint8,
],
),
"gload": OpSpec(name="gload", code=58, immediates=[ImmediateKind.uint8, ImmediateKind.uint8]),
"gloads": OpSpec(name="gloads", code=59, immediates=[ImmediateKind.uint8]),
"gaid": OpSpec(name="gaid", code=60, immediates=[ImmediateKind.uint8]),
"gaids": OpSpec(name="gaids", code=61, immediates=[]),
"loads": OpSpec(name="loads", code=62, immediates=[]),
"stores": OpSpec(name="stores", code=63, immediates=[]),
"bnz": OpSpec(name="bnz", code=64, immediates=[ImmediateKind.label]),
"bz": OpSpec(name="bz", code=65, immediates=[ImmediateKind.label]),
"b": OpSpec(name="b", code=66, immediates=[ImmediateKind.label]),
"return": OpSpec(name="return", code=67, immediates=[]),
"assert": OpSpec(name="assert", code=68, immediates=[]),
"bury": OpSpec(name="bury", code=69, immediates=[ImmediateKind.uint8]),
"popn": OpSpec(name="popn", code=70, immediates=[ImmediateKind.uint8]),
"dupn": OpSpec(name="dupn", code=71, immediates=[ImmediateKind.uint8]),
"pop": OpSpec(name="pop", code=72, immediates=[]),
"dup": OpSpec(name="dup", code=73, immediates=[]),
"dup2": OpSpec(name="dup2", code=74, immediates=[]),
"dig": OpSpec(name="dig", code=75, immediates=[ImmediateKind.uint8]),
"swap": OpSpec(name="swap", code=76, immediates=[]),
"select": OpSpec(name="select", code=77, immediates=[]),
"cover": OpSpec(name="cover", code=78, immediates=[ImmediateKind.uint8]),
"uncover": OpSpec(name="uncover", code=79, immediates=[ImmediateKind.uint8]),
"concat": OpSpec(name="concat", code=80, immediates=[]),
"substring": OpSpec(
name="substring", code=81, immediates=[ImmediateKind.uint8, ImmediateKind.uint8]
),
"substring3": OpSpec(name="substring3", code=82, immediates=[]),
"getbit": OpSpec(name="getbit", code=83, immediates=[]),
"setbit": OpSpec(name="setbit", code=84, immediates=[]),
"getbyte": OpSpec(name="getbyte", code=85, immediates=[]),
"setbyte": OpSpec(name="setbyte", code=86, immediates=[]),
"extract": OpSpec(
name="extract", code=87, immediates=[ImmediateKind.uint8, ImmediateKind.uint8]
),
"extract3": OpSpec(name="extract3", code=88, immediates=[]),
"extract_uint16": OpSpec(name="extract_uint16", code=89, immediates=[]),
"extract_uint32": OpSpec(name="extract_uint32", code=90, immediates=[]),
"extract_uint64": OpSpec(name="extract_uint64", code=91, immediates=[]),
"replace2": OpSpec(name="replace2", code=92, immediates=[ImmediateKind.uint8]),
"replace3": OpSpec(name="replace3", code=93, immediates=[]),
"base64_decode": OpSpec(
name="base64_decode",
code=94,
immediates=[ImmediateEnum(codes={"URLEncoding": 0, "StdEncoding": 1})],
),
"json_ref": OpSpec(
name="json_ref",
code=95,
immediates=[ImmediateEnum(codes={"JSONString": 0, "JSONUint64": 1, "JSONObject": 2})],
),
"balance": OpSpec(name="balance", code=96, immediates=[]),
"app_opted_in": OpSpec(name="app_opted_in", code=97, immediates=[]),
"app_local_get": OpSpec(name="app_local_get", code=98, immediates=[]),
"app_local_get_ex": OpSpec(name="app_local_get_ex", code=99, immediates=[]),
"app_global_get": OpSpec(name="app_global_get", code=100, immediates=[]),
"app_global_get_ex": OpSpec(name="app_global_get_ex", code=101, immediates=[]),
"app_local_put": OpSpec(name="app_local_put", code=102, immediates=[]),
"app_global_put": OpSpec(name="app_global_put", code=103, immediates=[]),
"app_local_del": OpSpec(name="app_local_del", code=104, immediates=[]),
"app_global_del": OpSpec(name="app_global_del", code=105, immediates=[]),
"asset_holding_get": OpSpec(
name="asset_holding_get",
code=112,
immediates=[ImmediateEnum(codes={"AssetBalance": 0, "AssetFrozen": 1})],
),
"asset_params_get": OpSpec(
name="asset_params_get",
code=113,
immediates=[
ImmediateEnum(
codes={
"AssetTotal": 0,
"AssetDecimals": 1,
"AssetDefaultFrozen": 2,
"AssetUnitName": 3,
"AssetName": 4,
"AssetURL": 5,
"AssetMetadataHash": 6,
"AssetManager": 7,
"AssetReserve": 8,
"AssetFreeze": 9,
"AssetClawback": 10,
"AssetCreator": 11,
}
)
],
),
"app_params_get": OpSpec(
name="app_params_get",
code=114,
immediates=[
ImmediateEnum(
codes={
"AppApprovalProgram": 0,
"AppClearStateProgram": 1,
"AppGlobalNumUint": 2,
"AppGlobalNumByteSlice": 3,
"AppLocalNumUint": 4,
"AppLocalNumByteSlice": 5,
"AppExtraProgramPages": 6,
"AppCreator": 7,
"AppAddress": 8,
}
)
],
),
"acct_params_get": OpSpec(
name="acct_params_get",
code=115,
immediates=[
ImmediateEnum(
codes={
"AcctBalance": 0,
"AcctMinBalance": 1,
"AcctAuthAddr": 2,
"AcctTotalNumUint": 3,
"AcctTotalNumByteSlice": 4,
"AcctTotalExtraAppPages": 5,
"AcctTotalAppsCreated": 6,
"AcctTotalAppsOptedIn": 7,
"AcctTotalAssetsCreated": 8,
"AcctTotalAssets": 9,
"AcctTotalBoxes": 10,
"AcctTotalBoxBytes": 11,
"AcctIncentiveEligible": 12,
"AcctLastProposed": 13,
"AcctLastHeartbeat": 14,
}
)
],
),
"voter_params_get": OpSpec(
name="voter_params_get",
code=116,
immediates=[ImmediateEnum(codes={"VoterBalance": 0, "VoterIncentiveEligible": 1})],
),
"online_stake": OpSpec(name="online_stake", code=117, immediates=[]),
"min_balance": OpSpec(name="min_balance", code=120, immediates=[]),
"pushbytes": OpSpec(name="pushbytes", code=128, immediates=[ImmediateKind.bytes]),
"pushint": OpSpec(name="pushint", code=129, immediates=[ImmediateKind.varuint]),
"pushbytess": OpSpec(name="pushbytess", code=130, immediates=[ImmediateKind.bytes_array]),
"pushints": OpSpec(name="pushints", code=131, immediates=[ImmediateKind.varuint_array]),
"ed25519verify_bare": OpSpec(name="ed25519verify_bare", code=132, immediates=[]),
"falcon_verify": OpSpec(name="falcon_verify", code=133, immediates=[]),
"sumhash512": OpSpec(name="sumhash512", code=134, immediates=[]),
"callsub": OpSpec(name="callsub", code=136, immediates=[ImmediateKind.label]),
"retsub": OpSpec(name="retsub", code=137, immediates=[]),
"proto": OpSpec(name="proto", code=138, immediates=[ImmediateKind.uint8, ImmediateKind.uint8]),
"frame_dig": OpSpec(name="frame_dig", code=139, immediates=[ImmediateKind.int8]),
"frame_bury": OpSpec(name="frame_bury", code=140, immediates=[ImmediateKind.int8]),
"switch": OpSpec(name="switch", code=141, immediates=[ImmediateKind.label_array]),
"match": OpSpec(name="match", code=142, immediates=[ImmediateKind.label_array]),
"shl": OpSpec(name="shl", code=144, immediates=[]),
"shr": OpSpec(name="shr", code=145, immediates=[]),
"sqrt": OpSpec(name="sqrt", code=146, immediates=[]),
"bitlen": OpSpec(name="bitlen", code=147, immediates=[]),
"exp": OpSpec(name="exp", code=148, immediates=[]),
"expw": OpSpec(name="expw", code=149, immediates=[]),
"bsqrt": OpSpec(name="bsqrt", code=150, immediates=[]),
"divw": OpSpec(name="divw", code=151, immediates=[]),
"sha3_256": OpSpec(name="sha3_256", code=152, immediates=[]),
"b+": OpSpec(name="b+", code=160, immediates=[]),
"b-": OpSpec(name="b-", code=161, immediates=[]),
"b/": OpSpec(name="b/", code=162, immediates=[]),
"b*": OpSpec(name="b*", code=163, immediates=[]),
"b<": OpSpec(name="b<", code=164, immediates=[]),
"b>": OpSpec(name="b>", code=165, immediates=[]),
"b<=": OpSpec(name="b<=", code=166, immediates=[]),
"b>=": OpSpec(name="b>=", code=167, immediates=[]),
"b==": OpSpec(name="b==", code=168, immediates=[]),
"b!=": OpSpec(name="b!=", code=169, immediates=[]),
"b%": OpSpec(name="b%", code=170, immediates=[]),
"b|": OpSpec(name="b|", code=171, immediates=[]),
"b&": OpSpec(name="b&", code=172, immediates=[]),
"b^": OpSpec(name="b^", code=173, immediates=[]),
"b~": OpSpec(name="b~", code=174, immediates=[]),
"bzero": OpSpec(name="bzero", code=175, immediates=[]),
"log": OpSpec(name="log", code=176, immediates=[]),
"itxn_begin": OpSpec(name="itxn_begin", code=177, immediates=[]),
"itxn_field": OpSpec(
name="itxn_field",
code=178,
immediates=[
ImmediateEnum(
codes={
"Sender": 0,
"Fee": 1,
"Note": 5,
"Receiver": 7,
"Amount": 8,
"CloseRemainderTo": 9,
"VotePK": 10,
"SelectionPK": 11,
"VoteFirst": 12,
"VoteLast": 13,
"VoteKeyDilution": 14,
"Type": 15,
"TypeEnum": 16,
"XferAsset": 17,
"AssetAmount": 18,
"AssetSender": 19,
"AssetReceiver": 20,
"AssetCloseTo": 21,
"ApplicationID": 24,
"OnCompletion": 25,
"ApplicationArgs": 26,
"Accounts": 28,
"ApprovalProgram": 30,
"ClearStateProgram": 31,
"RekeyTo": 32,
"ConfigAsset": 33,
"ConfigAssetTotal": 34,
"ConfigAssetDecimals": 35,
"ConfigAssetDefaultFrozen": 36,
"ConfigAssetUnitName": 37,
"ConfigAssetName": 38,
"ConfigAssetURL": 39,
"ConfigAssetMetadataHash": 40,
"ConfigAssetManager": 41,
"ConfigAssetReserve": 42,
"ConfigAssetFreeze": 43,
"ConfigAssetClawback": 44,
"FreezeAsset": 45,
"FreezeAssetAccount": 46,
"FreezeAssetFrozen": 47,
"Assets": 48,
"Applications": 50,
"GlobalNumUint": 52,
"GlobalNumByteSlice": 53,
"LocalNumUint": 54,
"LocalNumByteSlice": 55,
"ExtraProgramPages": 56,
"Nonparticipation": 57,
"StateProofPK": 63,
"ApprovalProgramPages": 64,
"ClearStateProgramPages": 66,
}
)
],
),
"itxn_submit": OpSpec(name="itxn_submit", code=179, immediates=[]),
"itxn": OpSpec(
name="itxn",
code=180,
immediates=[
ImmediateEnum(
codes={
"Sender": 0,
"Fee": 1,
"FirstValid": 2,
"FirstValidTime": 3,
"LastValid": 4,
"Note": 5,
"Lease": 6,
"Receiver": 7,
"Amount": 8,
"CloseRemainderTo": 9,
"VotePK": 10,
"SelectionPK": 11,
"VoteFirst": 12,
"VoteLast": 13,
"VoteKeyDilution": 14,
"Type": 15,
"TypeEnum": 16,
"XferAsset": 17,
"AssetAmount": 18,
"AssetSender": 19,
"AssetReceiver": 20,
"AssetCloseTo": 21,
"GroupIndex": 22,
"TxID": 23,
"ApplicationID": 24,
"OnCompletion": 25,
"ApplicationArgs": 26,
"NumAppArgs": 27,
"Accounts": 28,
"NumAccounts": 29,
"ApprovalProgram": 30,
"ClearStateProgram": 31,
"RekeyTo": 32,
"ConfigAsset": 33,
"ConfigAssetTotal": 34,
"ConfigAssetDecimals": 35,
"ConfigAssetDefaultFrozen": 36,
"ConfigAssetUnitName": 37,
"ConfigAssetName": 38,
"ConfigAssetURL": 39,
"ConfigAssetMetadataHash": 40,
"ConfigAssetManager": 41,
"ConfigAssetReserve": 42,
"ConfigAssetFreeze": 43,
"ConfigAssetClawback": 44,
"FreezeAsset": 45,
"FreezeAssetAccount": 46,
"FreezeAssetFrozen": 47,
"Assets": 48,
"NumAssets": 49,
"Applications": 50,
"NumApplications": 51,
"GlobalNumUint": 52,
"GlobalNumByteSlice": 53,
"LocalNumUint": 54,
"LocalNumByteSlice": 55,
"ExtraProgramPages": 56,
"Nonparticipation": 57,
"Logs": 58,
"NumLogs": 59,
"CreatedAssetID": 60,
"CreatedApplicationID": 61,
"LastLog": 62,
"StateProofPK": 63,
"ApprovalProgramPages": 64,
"NumApprovalProgramPages": 65,
"ClearStateProgramPages": 66,
"NumClearStateProgramPages": 67,
}
)
],
),
"itxna": OpSpec(
name="itxna",
code=181,
immediates=[
ImmediateEnum(
codes={
"ApplicationArgs": 26,
"Accounts": 28,
"Assets": 48,
"Applications": 50,
"Logs": 58,
"ApprovalProgramPages": 64,
"ClearStateProgramPages": 66,
}
),
ImmediateKind.uint8,
],
),
"itxn_next": OpSpec(name="itxn_next", code=182, immediates=[]),
"gitxn": OpSpec(
name="gitxn",
code=183,
immediates=[
ImmediateKind.uint8,
ImmediateEnum(
codes={
"Sender": 0,
"Fee": 1,
"FirstValid": 2,
"FirstValidTime": 3,
"LastValid": 4,
"Note": 5,
"Lease": 6,
"Receiver": 7,
"Amount": 8,
"CloseRemainderTo": 9,
"VotePK": 10,
"SelectionPK": 11,
"VoteFirst": 12,
"VoteLast": 13,
"VoteKeyDilution": 14,
"Type": 15,
"TypeEnum": 16,
"XferAsset": 17,
"AssetAmount": 18,
"AssetSender": 19,
"AssetReceiver": 20,
"AssetCloseTo": 21,
"GroupIndex": 22,
"TxID": 23,
"ApplicationID": 24,
"OnCompletion": 25,
"ApplicationArgs": 26,
"NumAppArgs": 27,
"Accounts": 28,
"NumAccounts": 29,
"ApprovalProgram": 30,
"ClearStateProgram": 31,
"RekeyTo": 32,
"ConfigAsset": 33,
"ConfigAssetTotal": 34,
"ConfigAssetDecimals": 35,
"ConfigAssetDefaultFrozen": 36,
"ConfigAssetUnitName": 37,
"ConfigAssetName": 38,
"ConfigAssetURL": 39,
"ConfigAssetMetadataHash": 40,
"ConfigAssetManager": 41,
"ConfigAssetReserve": 42,
"ConfigAssetFreeze": 43,
"ConfigAssetClawback": 44,
"FreezeAsset": 45,
"FreezeAssetAccount": 46,
"FreezeAssetFrozen": 47,
"Assets": 48,
"NumAssets": 49,
"Applications": 50,
"NumApplications": 51,
"GlobalNumUint": 52,
"GlobalNumByteSlice": 53,
"LocalNumUint": 54,
"LocalNumByteSlice": 55,
"ExtraProgramPages": 56,
"Nonparticipation": 57,
"Logs": 58,
"NumLogs": 59,
"CreatedAssetID": 60,
"CreatedApplicationID": 61,
"LastLog": 62,
"StateProofPK": 63,
"ApprovalProgramPages": 64,
"NumApprovalProgramPages": 65,
"ClearStateProgramPages": 66,
"NumClearStateProgramPages": 67,
}
),
],
),
"gitxna": OpSpec(
name="gitxna",
code=184,
immediates=[
ImmediateKind.uint8,
ImmediateEnum(
codes={
"ApplicationArgs": 26,
"Accounts": 28,
"Assets": 48,
"Applications": 50,
"Logs": 58,
"ApprovalProgramPages": 64,
"ClearStateProgramPages": 66,
}
),
ImmediateKind.uint8,
],
),
"box_create": OpSpec(name="box_create", code=185, immediates=[]),
"box_extract": OpSpec(name="box_extract", code=186, immediates=[]),
"box_replace": OpSpec(name="box_replace", code=187, immediates=[]),
"box_del": OpSpec(name="box_del", code=188, immediates=[]),
"box_len": OpSpec(name="box_len", code=189, immediates=[]),
"box_get": OpSpec(name="box_get", code=190, immediates=[]),
"box_put": OpSpec(name="box_put", code=191, immediates=[]),
"txnas": OpSpec(
name="txnas",
code=192,
immediates=[
ImmediateEnum(
codes={
"ApplicationArgs": 26,
"Accounts": 28,
"Assets": 48,
"Applications": 50,
"Logs": 58,
"ApprovalProgramPages": 64,
"ClearStateProgramPages": 66,
}
)
],
),
"gtxnas": OpSpec(
name="gtxnas",
code=193,
immediates=[
ImmediateKind.uint8,
ImmediateEnum(
codes={
"ApplicationArgs": 26,
"Accounts": 28,
"Assets": 48,
"Applications": 50,
"Logs": 58,
"ApprovalProgramPages": 64,
"ClearStateProgramPages": 66,
}
),
],
),
"gtxnsas": OpSpec(
name="gtxnsas",
code=194,
immediates=[
ImmediateEnum(
codes={
"ApplicationArgs": 26,
"Accounts": 28,
"Assets": 48,
"Applications": 50,
"Logs": 58,
"ApprovalProgramPages": 64,
"ClearStateProgramPages": 66,
}
)
],
),
"args": OpSpec(name="args", code=195, immediates=[]),
"gloadss": OpSpec(name="gloadss", code=196, immediates=[]),
"itxnas": OpSpec(
name="itxnas",
code=197,
immediates=[
ImmediateEnum(
codes={
"ApplicationArgs": 26,
"Accounts": 28,
"Assets": 48,
"Applications": 50,
"Logs": 58,
"ApprovalProgramPages": 64,
"ClearStateProgramPages": 66,
}
)
],
),
"gitxnas": OpSpec(
name="gitxnas",
code=198,
immediates=[
ImmediateKind.uint8,
ImmediateEnum(
codes={
"ApplicationArgs": 26,
"Accounts": 28,
"Assets": 48,
"Applications": 50,
"Logs": 58,
"ApprovalProgramPages": 64,
"ClearStateProgramPages": 66,
}
),
],
),
"vrf_verify": OpSpec(
name="vrf_verify", code=208, immediates=[ImmediateEnum(codes={"VrfAlgorand": 0})]
),
"block": OpSpec(
name="block",
code=209,
immediates=[
ImmediateEnum(
codes={
"BlkSeed": 0,
"BlkTimestamp": 1,
"BlkProposer": 2,
"BlkFeesCollected": 3,
"BlkBonus": 4,
"BlkBranch": 5,
"BlkFeeSink": 6,
"BlkProtocol": 7,
"BlkTxnCounter": 8,
"BlkProposerPayout": 9,
}
)
],
),
"box_splice": OpSpec(name="box_splice", code=210, immediates=[]),
"box_resize": OpSpec(name="box_resize", code=211, immediates=[]),
"ec_add": OpSpec(
name="ec_add",
code=224,
immediates=[
ImmediateEnum(codes={"BN254g1": 0, "BN254g2": 1, "BLS12_381g1": 2, "BLS12_381g2": 3})
],
),
"ec_scalar_mul": OpSpec(
name="ec_scalar_mul",
code=225,
immediates=[
ImmediateEnum(codes={"BN254g1": 0, "BN254g2": 1, "BLS12_381g1": 2, "BLS12_381g2": 3})
],
),
"ec_pairing_check": OpSpec(
name="ec_pairing_check",
code=226,
immediates=[
ImmediateEnum(codes={"BN254g1": 0, "BN254g2": 1, "BLS12_381g1": 2, "BLS12_381g2": 3})
],
),
"ec_multi_scalar_mul": OpSpec(
name="ec_multi_scalar_mul",
code=227,
immediates=[
ImmediateEnum(codes={"BN254g1": 0, "BN254g2": 1, "BLS12_381g1": 2, "BLS12_381g2": 3})
],
),
"ec_subgroup_check": OpSpec(
name="ec_subgroup_check",
code=228,
immediates=[
ImmediateEnum(codes={"BN254g1": 0, "BN254g2": 1, "BLS12_381g1": 2, "BLS12_381g2": 3})
],
),
"ec_map_to": OpSpec(
name="ec_map_to",
code=229,
immediates=[
ImmediateEnum(codes={"BN254g1": 0, "BN254g2": 1, "BLS12_381g1": 2, "BLS12_381g2": 3})
],
),
"mimc": OpSpec(
name="mimc",
code=230,
immediates=[ImmediateEnum(codes={"BN254Mp110": 0, "BLS12_381Mp111": 1})],
),
}
|
algorandfoundation/puya
|
src/puya/ussemble/op_spec.py
|
Python
|
NOASSERTION
| 38,713 |
import enum
from collections.abc import Mapping
import attrs
class ImmediateKind(enum.StrEnum):
uint8 = enum.auto()
int8 = enum.auto()
label = enum.auto()
varuint = enum.auto()
bytes = enum.auto()
# array types
label_array = enum.auto()
varuint_array = enum.auto()
bytes_array = enum.auto()
def __repr__(self) -> str:
return f"{type(self).__name__}.{self.name}"
@attrs.frozen
class ImmediateEnum:
codes: Mapping[str, int]
@attrs.frozen
class OpSpec:
name: str
code: int
immediates: list[ImmediateKind | ImmediateEnum]
|
algorandfoundation/puya
|
src/puya/ussemble/op_spec_models.py
|
Python
|
NOASSERTION
| 591 |
import base64
import contextlib
import functools
import math
import os
import typing
from collections.abc import Callable, Iterable, Iterator, MutableMapping, MutableSet, Sequence, Set
from pathlib import Path
import attrs
from puya.algo_constants import (
ADDRESS_CHECKSUM_LENGTH,
ENCODED_ADDRESS_LENGTH,
MAX_APP_PAGE_SIZE,
MAX_BYTES_LENGTH,
MAX_UINT64,
PUBLIC_KEY_HASH_LENGTH,
)
@attrs.frozen
class Address:
address: str
public_key: bytes = b""
check_sum: bytes = b""
is_valid: bool = False
@classmethod
def from_public_key(cls, public_key: bytes) -> typing.Self:
check_sum = sha512_256_hash(public_key)[-ADDRESS_CHECKSUM_LENGTH:]
address_bytes = public_key + check_sum
address = base64.b32encode(address_bytes).decode("utf8").rstrip("=")
assert len(address) == ENCODED_ADDRESS_LENGTH
return cls(
address=address,
public_key=public_key,
check_sum=check_sum,
is_valid=True,
)
@classmethod
def parse(cls, address: str) -> typing.Self:
# Pad address so it's a valid b32 string
padded_address = address + (6 * "=")
if not (len(address) == ENCODED_ADDRESS_LENGTH and valid_base32(padded_address)):
return cls(address)
address_bytes = base64.b32decode(padded_address)
if len(address_bytes) != PUBLIC_KEY_HASH_LENGTH + ADDRESS_CHECKSUM_LENGTH:
return cls(address)
public_key_hash = address_bytes[:PUBLIC_KEY_HASH_LENGTH]
check_sum = address_bytes[PUBLIC_KEY_HASH_LENGTH:]
verified_address = cls.from_public_key(public_key_hash)
return cls(
address=address,
public_key=public_key_hash,
check_sum=check_sum,
is_valid=verified_address.check_sum == check_sum,
)
def valid_base32(s: str) -> bool:
"""check if s is a valid base32 encoding string and fits into AVM bytes type"""
try:
value = base64.b32decode(s)
except ValueError:
return False
return valid_bytes(value)
# regex from PyTEAL, appears to be RFC-4648
# ^(?:[A-Z2-7]{8})*(?:([A-Z2-7]{2}([=]{6})?)|([A-Z2-7]{4}([=]{4})?)|([A-Z2-7]{5}([=]{3})?)|([A-Z2-7]{7}([=]{1})?))? # noqa: E501
def valid_base16(s: str) -> bool:
try:
value = base64.b16decode(s)
except ValueError:
return False
return valid_bytes(value)
def valid_base64(s: str) -> bool:
"""check if s is a valid base64 encoding string and fits into AVM bytes type"""
try:
value = base64.b64decode(s, validate=True)
except ValueError:
return False
return valid_bytes(value)
# regex from PyTEAL, appears to be RFC-4648
# ^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$
def valid_bytes(value: bytes) -> bool:
return len(value) <= MAX_BYTES_LENGTH
def valid_int64(value: int) -> bool:
return bool(0 <= value <= MAX_UINT64)
def valid_address(address: str) -> bool:
"""check if address is a valid address with checksum"""
return Address.parse(address).is_valid
def sha512_256_hash(value: bytes) -> bytes:
"""
Returns the SHA512/256 hash of a value. This is the hashing algorithm used
to generate address checksums
"""
from Cryptodome.Hash import SHA512
sha = SHA512.new(truncate="256")
sha.update(value)
return sha.digest()
def method_selector_hash(method_signature: str) -> bytes:
return sha512_256_hash(method_signature.encode("utf8"))[:4]
def attrs_extend[T: attrs.AttrsInstance](
new_type: type[T], base_instance: attrs.AttrsInstance, **changes: object
) -> T:
"""Like attrs.evolve but allows creating a related type"""
base_type = type(base_instance)
old_type_fields = attrs.fields_dict(base_type)
new_type_fields = attrs.fields(new_type)
for a in new_type_fields:
if not a.init:
continue
attr_name = a.name # To deal with private attributes.
init_name = a.alias
if init_name not in changes and attr_name in old_type_fields:
changes[init_name] = getattr(base_instance, attr_name)
return new_type(**changes)
@functools.cache
def make_path_relative_to(*, to: Path, path: Path, walk_up: bool = False) -> str:
with contextlib.suppress(ValueError):
path = path.relative_to(to, walk_up=walk_up)
return normalize_path(path)
def make_path_relative_to_cwd(path: Path) -> str:
return make_path_relative_to(to=Path.cwd(), path=path)
def unique[T](items: Iterable[T]) -> list[T]:
return list(dict.fromkeys(items))
class StableSet[T](MutableSet[T]):
__slots__ = ("_data",)
def __init__(self, *items: T) -> None:
self._data = dict.fromkeys(items)
@classmethod
def from_iter(cls, items: Iterable[T]) -> "StableSet[T]":
result = StableSet.__new__(StableSet)
result._data = dict.fromkeys(items) # noqa: SLF001
return result
def __eq__(self, other: object) -> bool:
if isinstance(other, StableSet):
return self._data.__eq__(other._data)
else:
return self._data.keys() == other
def __ne__(self, other: object) -> bool:
if isinstance(other, StableSet):
return self._data.__ne__(other._data)
else:
return self._data.keys() != other
def __contains__(self, x: object) -> bool:
return self._data.__contains__(x)
def __len__(self) -> int:
return self._data.__len__()
def __iter__(self) -> Iterator[T]:
return self._data.__iter__()
def add(self, value: T) -> None:
self._data[value] = None
def discard(self, value: T) -> None:
self._data.pop(value, None)
def intersection(self, other: Iterable[T]) -> "StableSet[T]":
result = StableSet.__new__(StableSet)
result._data = dict.fromkeys(k for k in self._data if k in other) # noqa: SLF001
return result
def __or__(self, other: Iterable[T]) -> "StableSet[T]": # type: ignore[override]
result = StableSet.__new__(StableSet)
if isinstance(other, StableSet):
other_data = other._data
else:
other_data = dict.fromkeys(other)
result._data = self._data | other_data
return result
def __ior__(self, other: Iterable[T]) -> typing.Self: # type: ignore[override]
if isinstance(other, StableSet):
other_data = other._data
else:
other_data = dict.fromkeys(other)
self._data |= other_data
return self
def __sub__(self, other: Set[T]) -> "StableSet[T]":
result = StableSet.__new__(StableSet)
if isinstance(other, StableSet):
data: Iterable[T] = self._data.keys() - other._data.keys()
else:
data = (k for k in self._data if k not in other)
result._data = dict.fromkeys(data)
return result
def __repr__(self) -> str:
return type(self).__name__ + "(" + ", ".join(map(repr, self._data)) + ")"
def lazy_setdefault[T, U](m: MutableMapping[T, U], /, key: T, default: Callable[[T], U]) -> U:
"""dict.setdefault, but with a callable"""
try:
return m[key]
except KeyError:
pass
value = default(key)
m[key] = value
return value
_INVERT_ORDERED_BINARY_OP = str.maketrans("<>", "><")
def invert_ordered_binary_op(op: str) -> str:
return op.translate(_INVERT_ORDERED_BINARY_OP)
def clamp(value: int, *, low: int, high: int) -> int:
if value < low:
return low
if value > high:
return high
return value
def bits_to_bytes(bit_size: int) -> int:
return int(math.ceil(bit_size / 8))
def round_bits_to_nearest_bytes(bit_size: int) -> int:
return bits_to_bytes(bit_size) * 8
@contextlib.contextmanager
def pushd(new_dir: Path) -> Iterator[None]:
orig_dir = Path.cwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(orig_dir)
def normalise_path_to_str(path: Path) -> str:
return str(path).replace("\\", "/")
def biguint_bytes_eval(value: int) -> bytes:
byte_length = math.ceil(value.bit_length() / 8.0)
assert byte_length <= 64, "Biguints must be 64 bytes or less"
big_uint_bytes = value.to_bytes(byteorder="big", length=byte_length)
return big_uint_bytes
def calculate_extra_program_pages(approval_program_length: int, clear_program_length: int) -> int:
total_bytes = approval_program_length + clear_program_length
return (total_bytes - 1) // MAX_APP_PAGE_SIZE
@typing.overload
def coalesce[T](arg1: T | None, arg2: T, /) -> T: ...
@typing.overload
def coalesce[T](arg1: T | None, arg2: T | None, arg3: T, /) -> T: ...
@typing.overload
def coalesce[T](*args: T | None) -> T | None: ...
def coalesce[T](*args: T | None) -> T | None:
"""Shorthand for `a if a is not None else b`, with eager evaluation as a tradeoff"""
# REFACTOR: if there's a better way to do the above overloads, we should.
# the problem is you can't have a positional argument after *args,
# and we want to take the last one's type separately
for arg in args:
if arg is not None:
return arg
return None
def positive_index[T](idx: int, seq: Sequence[T]) -> int:
return idx if idx >= 0 else len(seq) + idx
def set_add[T](set_: MutableSet[T], value: T) -> bool:
"""ensure item exists in a set, returning if it was added or not"""
added = value not in set_
set_.add(value)
return added
def set_remove[T](set_: MutableSet[T], value: T) -> bool:
removed = value in set_
set_.discard(value)
return removed
def normalize_path(path: Path) -> str:
return str(path).replace(os.sep, "/")
|
algorandfoundation/puya
|
src/puya/utils.py
|
Python
|
NOASSERTION
| 9,783 |
import pathlib
import sys
_VENDOR_PATH = pathlib.Path(__file__).parent / "_vendor"
sys.path.insert(0, str(_VENDOR_PATH))
|
algorandfoundation/puya
|
src/puyapy/__init__.py
|
Python
|
NOASSERTION
| 123 |
import argparse
import typing
from collections.abc import Sequence
from importlib.metadata import version
from pathlib import Path
from puya.algo_constants import MAINNET_AVM_VERSION, SUPPORTED_AVM_VERSIONS
from puya.log import LogLevel, configure_logging
from puya.options import LocalsCoalescingStrategy
from puyapy.compile import compile_to_teal
from puyapy.options import PuyaPyOptions
from puyapy.template import parse_template_key_value
def main() -> None:
parser = argparse.ArgumentParser(
prog="puyapy",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--version", action="version", version=f"%(prog)s {version('puyapy')}")
parser.add_argument(
"-O",
"--optimization-level",
type=int,
choices=[0, 1, 2],
default=1,
help="Set optimization level of output TEAL / AVM bytecode",
)
parser.add_argument(
"--output-teal",
action=argparse.BooleanOptionalAction,
default=True,
help="Output TEAL code",
)
parser.add_argument(
"--output-source-map",
action=argparse.BooleanOptionalAction,
default=True,
help="Output debug source maps",
)
parser.add_argument(
"--output-arc32",
action=argparse.BooleanOptionalAction,
default=True,
help="Output {contract}.arc32.json ARC-32 app spec file",
)
parser.add_argument(
"--output-arc56",
action=argparse.BooleanOptionalAction,
default=False,
help="Output {contract}.arc56.json ARC-56 app spec file",
)
parser.add_argument(
"--output-client",
action=argparse.BooleanOptionalAction,
default=False,
help="Output Algorand Python contract client for typed ARC4 ABI calls",
)
parser.add_argument(
"--out-dir", type=Path, help="Path for outputting artefacts", default=False
)
parser.add_argument(
"--log-level",
type=LogLevel.from_string,
choices=list(LogLevel),
default=LogLevel.info,
help="Minimum level to log to console",
)
parser.add_argument(
"-g", # -g chosen because it is the same option for debug in gcc
"--debug-level",
type=int,
choices=[0, 1, 2],
default=1,
help="Output debug information level, 0 = none, 1 = debug, 2 = reserved for future use",
)
parser.add_argument(
"--output-awst",
action=argparse.BooleanOptionalAction,
default=False,
help="Output parsed result of AWST",
)
parser.add_argument(
"--output-awst-json",
action=argparse.BooleanOptionalAction,
default=False,
help="Output parsed result of AWST as JSON",
)
parser.add_argument(
"--output-ssa-ir",
action=argparse.BooleanOptionalAction,
default=False,
help="Output IR (in SSA form) before optimisations",
)
parser.add_argument(
"--output-optimization-ir",
action=argparse.BooleanOptionalAction,
default=False,
help="Output IR after each optimization",
)
parser.add_argument(
"--output-destructured-ir",
action=argparse.BooleanOptionalAction,
default=False,
help="Output IR after SSA destructuring and before MIR",
)
parser.add_argument(
"--output-memory-ir",
action=argparse.BooleanOptionalAction,
default=False,
help="Output MIR before lowering to TealOps",
)
parser.add_argument(
"--output-bytecode",
action=argparse.BooleanOptionalAction,
default=False,
help="Output AVM bytecode",
)
parser.add_argument(
"--match-algod-bytecode",
action=_EmitDeprecated,
dest=argparse.SUPPRESS,
nargs=0,
help="Deprecated: When outputting bytecode, ensure bytecode matches algod output",
)
parser.add_argument(
"-T",
"--template-var",
dest="cli_template_definitions",
metavar="VAR=VALUE",
action=_ParseAndStoreTemplateVar,
default={},
nargs="+",
help="Define template vars for use when assembling via --output-bytecode"
" should be specified without the prefix (see --template-vars-prefix), e.g."
' -T SOME_INT=1234 SOME_BYTES=0x1A2B SOME_BOOL=True SOME_STR=\\"hello\\"',
)
parser.add_argument(
"--template-vars-prefix",
help="Define the prefix to use with --template-var",
default="TMPL_",
)
parser.add_argument(
"--target-avm-version",
type=int,
choices=SUPPORTED_AVM_VERSIONS,
default=MAINNET_AVM_VERSION,
)
parser.add_argument(
"--locals-coalescing-strategy",
type=LocalsCoalescingStrategy,
choices=list(LocalsCoalescingStrategy),
default=LocalsCoalescingStrategy.root_operand,
help=(
"Strategy choice for out-of-ssa local variable coalescing. "
"The best choice for your app is best determined through experimentation"
),
)
parser.add_argument("paths", type=Path, nargs="+", metavar="PATH")
namespace = parser.parse_args()
options = PuyaPyOptions(**vars(namespace))
configure_logging(min_log_level=options.log_level)
compile_to_teal(options)
class _EmitDeprecated(argparse.Action):
@typing.override
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[typing.Any] | None,
option_string: str | None = None,
) -> None:
print(f"warning: {option_string} is deprecated and no longer does anything") # noqa: T201
class _ParseAndStoreTemplateVar(argparse.Action):
@typing.override
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[typing.Any] | None,
option_string: str | None = None,
) -> None:
mapping: dict[str, int | bytes] = dict(getattr(namespace, self.dest, {}))
lst = []
if isinstance(values, str):
lst = [values]
elif values:
for value in values:
assert isinstance(value, str)
lst.append(value)
for kv in lst:
try:
name, value = parse_template_key_value(kv)
except Exception as ex:
parser.error(str(ex))
mapping[name] = value
setattr(namespace, self.dest, mapping)
if __name__ == "__main__":
main()
|
algorandfoundation/puya
|
src/puyapy/__main__.py
|
Python
|
NOASSERTION
| 6,637 |
# This page intentionally left blank
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/__init__.py
|
Python
|
NOASSERTION
| 37 |
"""Mypy type checker command line tool."""
from __future__ import annotations
import os
import sys
import traceback
from mypy.main import main, process_options
from mypy.util import FancyFormatter
def console_entry() -> None:
try:
main()
sys.stdout.flush()
sys.stderr.flush()
except BrokenPipeError:
# Python flushes standard streams on exit; redirect remaining output
# to devnull to avoid another BrokenPipeError at shutdown
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(2)
except KeyboardInterrupt:
_, options = process_options(args=sys.argv[1:])
if options.show_traceback:
sys.stdout.write(traceback.format_exc())
formatter = FancyFormatter(sys.stdout, sys.stderr, False)
msg = "Interrupted\n"
sys.stdout.write(formatter.style(msg, color="red", bold=True))
sys.stdout.flush()
sys.stderr.flush()
sys.exit(2)
if __name__ == "__main__":
console_entry()
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/__main__.py
|
Python
|
NOASSERTION
| 1,061 |
"""This module makes it possible to use mypy as part of a Python application.
Since mypy still changes, the API was kept utterly simple and non-intrusive.
It just mimics command line activation without starting a new interpreter.
So the normal docs about the mypy command line apply.
Changes in the command line version of mypy will be immediately usable.
Just import this module and then call the 'run' function with a parameter of
type List[str], containing what normally would have been the command line
arguments to mypy.
Function 'run' returns a Tuple[str, str, int], namely
(<normal_report>, <error_report>, <exit_status>),
in which <normal_report> is what mypy normally writes to sys.stdout,
<error_report> is what mypy normally writes to sys.stderr and exit_status is
the exit status mypy normally returns to the operating system.
Any pretty formatting is left to the caller.
The 'run_dmypy' function is similar, but instead mimics invocation of
dmypy. Note that run_dmypy is not thread-safe and modifies sys.stdout
and sys.stderr during its invocation.
Note that these APIs don't support incremental generation of error
messages.
Trivial example of code using this module:
import sys
from mypy import api
result = api.run(sys.argv[1:])
if result[0]:
print('\nType checking report:\n')
print(result[0]) # stdout
if result[1]:
print('\nError report:\n')
print(result[1]) # stderr
print('\nExit status:', result[2])
"""
from __future__ import annotations
import sys
from io import StringIO
from typing import Callable, TextIO
def _run(main_wrapper: Callable[[TextIO, TextIO], None]) -> tuple[str, str, int]:
stdout = StringIO()
stderr = StringIO()
try:
main_wrapper(stdout, stderr)
exit_status = 0
except SystemExit as system_exit:
assert isinstance(system_exit.code, int)
exit_status = system_exit.code
return stdout.getvalue(), stderr.getvalue(), exit_status
def run(args: list[str]) -> tuple[str, str, int]:
# Lazy import to avoid needing to import all of mypy to call run_dmypy
from mypy.main import main
return _run(
lambda stdout, stderr: main(args=args, stdout=stdout, stderr=stderr, clean_exit=True)
)
def run_dmypy(args: list[str]) -> tuple[str, str, int]:
from mypy.dmypy.client import main
# A bunch of effort has been put into threading stdout and stderr
# through the main API to avoid the threadsafety problems of
# modifying sys.stdout/sys.stderr, but that hasn't been done for
# the dmypy client, so we just do the non-threadsafe thing.
def f(stdout: TextIO, stderr: TextIO) -> None:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = stdout
sys.stderr = stderr
main(args)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
return _run(f)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/api.py
|
Python
|
NOASSERTION
| 2,922 |
from __future__ import annotations
from typing import Callable, Iterable, Sequence
import mypy.subtypes
from mypy.erasetype import erase_typevars
from mypy.expandtype import expand_type
from mypy.nodes import Context, TypeInfo
from mypy.type_visitor import TypeTranslator
from mypy.typeops import get_all_type_vars
from mypy.types import (
AnyType,
CallableType,
Instance,
Parameters,
ParamSpecFlavor,
ParamSpecType,
PartialType,
ProperType,
Type,
TypeAliasType,
TypeVarId,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
UninhabitedType,
UnpackType,
get_proper_type,
remove_dups,
)
def get_target_type(
tvar: TypeVarLikeType,
type: Type,
callable: CallableType,
report_incompatible_typevar_value: Callable[[CallableType, Type, str, Context], None],
context: Context,
skip_unsatisfied: bool,
) -> Type | None:
p_type = get_proper_type(type)
if isinstance(p_type, UninhabitedType) and tvar.has_default():
return tvar.default
if isinstance(tvar, ParamSpecType):
return type
if isinstance(tvar, TypeVarTupleType):
return type
assert isinstance(tvar, TypeVarType)
values = tvar.values
if values:
if isinstance(p_type, AnyType):
return type
if isinstance(p_type, TypeVarType) and p_type.values:
# Allow substituting T1 for T if every allowed value of T1
# is also a legal value of T.
if all(any(mypy.subtypes.is_same_type(v, v1) for v in values) for v1 in p_type.values):
return type
matching = []
for value in values:
if mypy.subtypes.is_subtype(type, value):
matching.append(value)
if matching:
best = matching[0]
# If there are more than one matching value, we select the narrowest
for match in matching[1:]:
if mypy.subtypes.is_subtype(match, best):
best = match
return best
if skip_unsatisfied:
return None
report_incompatible_typevar_value(callable, type, tvar.name, context)
else:
upper_bound = tvar.upper_bound
if tvar.name == "Self":
# Internally constructed Self-types contain class type variables in upper bound,
# so we need to erase them to avoid false positives. This is safe because we do
# not support type variables in upper bounds of user defined types.
upper_bound = erase_typevars(upper_bound)
if not mypy.subtypes.is_subtype(type, upper_bound):
if skip_unsatisfied:
return None
report_incompatible_typevar_value(callable, type, tvar.name, context)
return type
def apply_generic_arguments(
callable: CallableType,
orig_types: Sequence[Type | None],
report_incompatible_typevar_value: Callable[[CallableType, Type, str, Context], None],
context: Context,
skip_unsatisfied: bool = False,
) -> CallableType:
"""Apply generic type arguments to a callable type.
For example, applying [int] to 'def [T] (T) -> T' results in
'def (int) -> int'.
Note that each type can be None; in this case, it will not be applied.
If `skip_unsatisfied` is True, then just skip the types that don't satisfy type variable
bound or constraints, instead of giving an error.
"""
tvars = callable.variables
assert len(orig_types) <= len(tvars)
# Check that inferred type variable values are compatible with allowed
# values and bounds. Also, promote subtype values to allowed values.
# Create a map from type variable id to target type.
id_to_type: dict[TypeVarId, Type] = {}
for tvar, type in zip(tvars, orig_types):
assert not isinstance(type, PartialType), "Internal error: must never apply partial type"
if type is None:
continue
target_type = get_target_type(
tvar, type, callable, report_incompatible_typevar_value, context, skip_unsatisfied
)
if target_type is not None:
id_to_type[tvar.id] = target_type
# TODO: validate arg_kinds/arg_names for ParamSpec and TypeVarTuple replacements,
# not just type variable bounds above.
param_spec = callable.param_spec()
if param_spec is not None:
nt = id_to_type.get(param_spec.id)
if nt is not None:
# ParamSpec expansion is special-cased, so we need to always expand callable
# as a whole, not expanding arguments individually.
callable = expand_type(callable, id_to_type)
assert isinstance(callable, CallableType)
return callable.copy_modified(
variables=[tv for tv in tvars if tv.id not in id_to_type]
)
# Apply arguments to argument types.
var_arg = callable.var_arg()
if var_arg is not None and isinstance(var_arg.typ, UnpackType):
# Same as for ParamSpec, callable with variadic types needs to be expanded as a whole.
callable = expand_type(callable, id_to_type)
assert isinstance(callable, CallableType)
return callable.copy_modified(variables=[tv for tv in tvars if tv.id not in id_to_type])
else:
callable = callable.copy_modified(
arg_types=[expand_type(at, id_to_type) for at in callable.arg_types]
)
# Apply arguments to TypeGuard and TypeIs if any.
if callable.type_guard is not None:
type_guard = expand_type(callable.type_guard, id_to_type)
else:
type_guard = None
if callable.type_is is not None:
type_is = expand_type(callable.type_is, id_to_type)
else:
type_is = None
# The callable may retain some type vars if only some were applied.
# TODO: move apply_poly() logic here when new inference
# becomes universally used (i.e. in all passes + in unification).
# With this new logic we can actually *add* some new free variables.
remaining_tvars: list[TypeVarLikeType] = []
for tv in tvars:
if tv.id in id_to_type:
continue
if not tv.has_default():
remaining_tvars.append(tv)
continue
# TypeVarLike isn't in id_to_type mapping.
# Only expand the TypeVar default here.
typ = expand_type(tv, id_to_type)
assert isinstance(typ, TypeVarLikeType)
remaining_tvars.append(typ)
return callable.copy_modified(
ret_type=expand_type(callable.ret_type, id_to_type),
variables=remaining_tvars,
type_guard=type_guard,
type_is=type_is,
)
def apply_poly(tp: CallableType, poly_tvars: Sequence[TypeVarLikeType]) -> CallableType | None:
"""Make free type variables generic in the type if possible.
This will translate the type `tp` while trying to create valid bindings for
type variables `poly_tvars` while traversing the type. This follows the same rules
as we do during semantic analysis phase, examples:
* Callable[Callable[[T], T], T] -> def [T] (def (T) -> T) -> T
* Callable[[], Callable[[T], T]] -> def () -> def [T] (T -> T)
* List[T] -> None (not possible)
"""
try:
return tp.copy_modified(
arg_types=[t.accept(PolyTranslator(poly_tvars)) for t in tp.arg_types],
ret_type=tp.ret_type.accept(PolyTranslator(poly_tvars)),
variables=[],
)
except PolyTranslationError:
return None
class PolyTranslationError(Exception):
pass
class PolyTranslator(TypeTranslator):
"""Make free type variables generic in the type if possible.
See docstring for apply_poly() for details.
"""
def __init__(
self,
poly_tvars: Iterable[TypeVarLikeType],
bound_tvars: frozenset[TypeVarLikeType] = frozenset(),
seen_aliases: frozenset[TypeInfo] = frozenset(),
) -> None:
super().__init__()
self.poly_tvars = set(poly_tvars)
# This is a simplified version of TypeVarScope used during semantic analysis.
self.bound_tvars = bound_tvars
self.seen_aliases = seen_aliases
def collect_vars(self, t: CallableType | Parameters) -> list[TypeVarLikeType]:
found_vars = []
for arg in t.arg_types:
for tv in get_all_type_vars(arg):
if isinstance(tv, ParamSpecType):
normalized: TypeVarLikeType = tv.copy_modified(
flavor=ParamSpecFlavor.BARE, prefix=Parameters([], [], [])
)
else:
normalized = tv
if normalized in self.poly_tvars and normalized not in self.bound_tvars:
found_vars.append(normalized)
return remove_dups(found_vars)
def visit_callable_type(self, t: CallableType) -> Type:
found_vars = self.collect_vars(t)
self.bound_tvars |= set(found_vars)
result = super().visit_callable_type(t)
self.bound_tvars -= set(found_vars)
assert isinstance(result, ProperType) and isinstance(result, CallableType)
result.variables = list(result.variables) + found_vars
return result
def visit_type_var(self, t: TypeVarType) -> Type:
if t in self.poly_tvars and t not in self.bound_tvars:
raise PolyTranslationError()
return super().visit_type_var(t)
def visit_param_spec(self, t: ParamSpecType) -> Type:
if t in self.poly_tvars and t not in self.bound_tvars:
raise PolyTranslationError()
return super().visit_param_spec(t)
def visit_type_var_tuple(self, t: TypeVarTupleType) -> Type:
if t in self.poly_tvars and t not in self.bound_tvars:
raise PolyTranslationError()
return super().visit_type_var_tuple(t)
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
if not t.args:
return t.copy_modified()
if not t.is_recursive:
return get_proper_type(t).accept(self)
# We can't handle polymorphic application for recursive generic aliases
# without risking an infinite recursion, just give up for now.
raise PolyTranslationError()
def visit_instance(self, t: Instance) -> Type:
if t.type.has_param_spec_type:
# We need this special-casing to preserve the possibility to store a
# generic function in an instance type. Things like
# forall T . Foo[[x: T], T]
# are not really expressible in current type system, but this looks like
# a useful feature, so let's keep it.
param_spec_index = next(
i for (i, tv) in enumerate(t.type.defn.type_vars) if isinstance(tv, ParamSpecType)
)
p = get_proper_type(t.args[param_spec_index])
if isinstance(p, Parameters):
found_vars = self.collect_vars(p)
self.bound_tvars |= set(found_vars)
new_args = [a.accept(self) for a in t.args]
self.bound_tvars -= set(found_vars)
repl = new_args[param_spec_index]
assert isinstance(repl, ProperType) and isinstance(repl, Parameters)
repl.variables = list(repl.variables) + list(found_vars)
return t.copy_modified(args=new_args)
# There is the same problem with callback protocols as with aliases
# (callback protocols are essentially more flexible aliases to callables).
if t.args and t.type.is_protocol and t.type.protocol_members == ["__call__"]:
if t.type in self.seen_aliases:
raise PolyTranslationError()
call = mypy.subtypes.find_member("__call__", t, t, is_operator=True)
assert call is not None
return call.accept(
PolyTranslator(self.poly_tvars, self.bound_tvars, self.seen_aliases | {t.type})
)
return super().visit_instance(t)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/applytype.py
|
Python
|
NOASSERTION
| 12,022 |
"""Utilities for mapping between actual and formal arguments (and their types)."""
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, Sequence
from mypy import nodes
from mypy.maptype import map_instance_to_supertype
from mypy.types import (
AnyType,
Instance,
ParamSpecType,
TupleType,
Type,
TypedDictType,
TypeOfAny,
TypeVarTupleType,
UnpackType,
get_proper_type,
)
if TYPE_CHECKING:
from mypy.infer import ArgumentInferContext
def map_actuals_to_formals(
actual_kinds: list[nodes.ArgKind],
actual_names: Sequence[str | None] | None,
formal_kinds: list[nodes.ArgKind],
formal_names: Sequence[str | None],
actual_arg_type: Callable[[int], Type],
) -> list[list[int]]:
"""Calculate mapping between actual (caller) args and formals.
The result contains a list of caller argument indexes mapping to each
callee argument index, indexed by callee index.
The caller_arg_type argument should evaluate to the type of the actual
argument type with the given index.
"""
nformals = len(formal_kinds)
formal_to_actual: list[list[int]] = [[] for i in range(nformals)]
ambiguous_actual_kwargs: list[int] = []
fi = 0
for ai, actual_kind in enumerate(actual_kinds):
if actual_kind == nodes.ARG_POS:
if fi < nformals:
if not formal_kinds[fi].is_star():
formal_to_actual[fi].append(ai)
fi += 1
elif formal_kinds[fi] == nodes.ARG_STAR:
formal_to_actual[fi].append(ai)
elif actual_kind == nodes.ARG_STAR:
# We need to know the actual type to map varargs.
actualt = get_proper_type(actual_arg_type(ai))
if isinstance(actualt, TupleType):
# A tuple actual maps to a fixed number of formals.
for _ in range(len(actualt.items)):
if fi < nformals:
if formal_kinds[fi] != nodes.ARG_STAR2:
formal_to_actual[fi].append(ai)
else:
break
if formal_kinds[fi] != nodes.ARG_STAR:
fi += 1
else:
# Assume that it is an iterable (if it isn't, there will be
# an error later).
while fi < nformals:
if formal_kinds[fi].is_named(star=True):
break
else:
formal_to_actual[fi].append(ai)
if formal_kinds[fi] == nodes.ARG_STAR:
break
fi += 1
elif actual_kind.is_named():
assert actual_names is not None, "Internal error: named kinds without names given"
name = actual_names[ai]
if name in formal_names:
formal_to_actual[formal_names.index(name)].append(ai)
elif nodes.ARG_STAR2 in formal_kinds:
formal_to_actual[formal_kinds.index(nodes.ARG_STAR2)].append(ai)
else:
assert actual_kind == nodes.ARG_STAR2
actualt = get_proper_type(actual_arg_type(ai))
if isinstance(actualt, TypedDictType):
for name in actualt.items:
if name in formal_names:
formal_to_actual[formal_names.index(name)].append(ai)
elif nodes.ARG_STAR2 in formal_kinds:
formal_to_actual[formal_kinds.index(nodes.ARG_STAR2)].append(ai)
else:
# We don't exactly know which **kwargs are provided by the
# caller, so we'll defer until all the other unambiguous
# actuals have been processed
ambiguous_actual_kwargs.append(ai)
if ambiguous_actual_kwargs:
# Assume the ambiguous kwargs will fill the remaining arguments.
#
# TODO: If there are also tuple varargs, we might be missing some potential
# matches if the tuple was short enough to not match everything.
unmatched_formals = [
fi
for fi in range(nformals)
if (
formal_names[fi]
and (
not formal_to_actual[fi]
or actual_kinds[formal_to_actual[fi][0]] == nodes.ARG_STAR
)
and formal_kinds[fi] != nodes.ARG_STAR
)
or formal_kinds[fi] == nodes.ARG_STAR2
]
for ai in ambiguous_actual_kwargs:
for fi in unmatched_formals:
formal_to_actual[fi].append(ai)
return formal_to_actual
def map_formals_to_actuals(
actual_kinds: list[nodes.ArgKind],
actual_names: Sequence[str | None] | None,
formal_kinds: list[nodes.ArgKind],
formal_names: list[str | None],
actual_arg_type: Callable[[int], Type],
) -> list[list[int]]:
"""Calculate the reverse mapping of map_actuals_to_formals."""
formal_to_actual = map_actuals_to_formals(
actual_kinds, actual_names, formal_kinds, formal_names, actual_arg_type
)
# Now reverse the mapping.
actual_to_formal: list[list[int]] = [[] for _ in actual_kinds]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
actual_to_formal[actual].append(formal)
return actual_to_formal
class ArgTypeExpander:
"""Utility class for mapping actual argument types to formal arguments.
One of the main responsibilities is to expand caller tuple *args and TypedDict
**kwargs, and to keep track of which tuple/TypedDict items have already been
consumed.
Example:
def f(x: int, *args: str) -> None: ...
f(*(1, 'x', 1.1))
We'd call expand_actual_type three times:
1. The first call would provide 'int' as the actual type of 'x' (from '1').
2. The second call would provide 'str' as one of the actual types for '*args'.
2. The third call would provide 'float' as one of the actual types for '*args'.
A single instance can process all the arguments for a single call. Each call
needs a separate instance since instances have per-call state.
"""
def __init__(self, context: ArgumentInferContext) -> None:
# Next tuple *args index to use.
self.tuple_index = 0
# Keyword arguments in TypedDict **kwargs used.
self.kwargs_used: set[str] = set()
# Type context for `*` and `**` arg kinds.
self.context = context
def expand_actual_type(
self,
actual_type: Type,
actual_kind: nodes.ArgKind,
formal_name: str | None,
formal_kind: nodes.ArgKind,
allow_unpack: bool = False,
) -> Type:
"""Return the actual (caller) type(s) of a formal argument with the given kinds.
If the actual argument is a tuple *args, return the next individual tuple item that
maps to the formal arg.
If the actual argument is a TypedDict **kwargs, return the next matching typed dict
value type based on formal argument name and kind.
This is supposed to be called for each formal, in order. Call multiple times per
formal if multiple actuals map to a formal.
"""
original_actual = actual_type
actual_type = get_proper_type(actual_type)
if actual_kind == nodes.ARG_STAR:
if isinstance(actual_type, TypeVarTupleType):
# This code path is hit when *Ts is passed to a callable and various
# special-handling didn't catch this. The best thing we can do is to use
# the upper bound.
actual_type = get_proper_type(actual_type.upper_bound)
if isinstance(actual_type, Instance) and actual_type.args:
from mypy.subtypes import is_subtype
if is_subtype(actual_type, self.context.iterable_type):
return map_instance_to_supertype(
actual_type, self.context.iterable_type.type
).args[0]
else:
# We cannot properly unpack anything other
# than `Iterable` type with `*`.
# Just return `Any`, other parts of code would raise
# a different error for improper use.
return AnyType(TypeOfAny.from_error)
elif isinstance(actual_type, TupleType):
# Get the next tuple item of a tuple *arg.
if self.tuple_index >= len(actual_type.items):
# Exhausted a tuple -- continue to the next *args.
self.tuple_index = 1
else:
self.tuple_index += 1
item = actual_type.items[self.tuple_index - 1]
if isinstance(item, UnpackType) and not allow_unpack:
# An upack item that doesn't have special handling, use upper bound as above.
unpacked = get_proper_type(item.type)
if isinstance(unpacked, TypeVarTupleType):
fallback = get_proper_type(unpacked.upper_bound)
else:
fallback = unpacked
assert (
isinstance(fallback, Instance)
and fallback.type.fullname == "builtins.tuple"
)
item = fallback.args[0]
return item
elif isinstance(actual_type, ParamSpecType):
# ParamSpec is valid in *args but it can't be unpacked.
return actual_type
else:
return AnyType(TypeOfAny.from_error)
elif actual_kind == nodes.ARG_STAR2:
from mypy.subtypes import is_subtype
if isinstance(actual_type, TypedDictType):
if formal_kind != nodes.ARG_STAR2 and formal_name in actual_type.items:
# Lookup type based on keyword argument name.
assert formal_name is not None
else:
# Pick an arbitrary item if no specified keyword is expected.
formal_name = (set(actual_type.items.keys()) - self.kwargs_used).pop()
self.kwargs_used.add(formal_name)
return actual_type.items[formal_name]
elif (
isinstance(actual_type, Instance)
and len(actual_type.args) > 1
and is_subtype(actual_type, self.context.mapping_type)
):
# Only `Mapping` type can be unpacked with `**`.
# Other types will produce an error somewhere else.
return map_instance_to_supertype(actual_type, self.context.mapping_type.type).args[
1
]
elif isinstance(actual_type, ParamSpecType):
# ParamSpec is valid in **kwargs but it can't be unpacked.
return actual_type
else:
return AnyType(TypeOfAny.from_error)
else:
# No translation for other kinds -- 1:1 mapping.
return original_actual
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/argmap.py
|
Python
|
NOASSERTION
| 11,300 |
from __future__ import annotations
from collections import defaultdict
from contextlib import contextmanager
from typing import DefaultDict, Iterator, List, Optional, Tuple, Union, cast
from typing_extensions import TypeAlias as _TypeAlias
from mypy.erasetype import remove_instance_last_known_values
from mypy.join import join_simple
from mypy.literals import Key, literal, literal_hash, subkeys
from mypy.nodes import Expression, IndexExpr, MemberExpr, NameExpr, RefExpr, TypeInfo, Var
from mypy.subtypes import is_same_type, is_subtype
from mypy.types import (
AnyType,
Instance,
NoneType,
PartialType,
ProperType,
TupleType,
Type,
TypeOfAny,
TypeType,
UnionType,
UnpackType,
find_unpack_in_list,
get_proper_type,
)
from mypy.typevars import fill_typevars_with_any
BindableExpression: _TypeAlias = Union[IndexExpr, MemberExpr, NameExpr]
class Frame:
"""A Frame represents a specific point in the execution of a program.
It carries information about the current types of expressions at
that point, arising either from assignments to those expressions
or the result of isinstance checks. It also records whether it is
possible to reach that point at all.
This information is not copied into a new Frame when it is pushed
onto the stack, so a given Frame only has information about types
that were assigned in that frame.
"""
def __init__(self, id: int, conditional_frame: bool = False) -> None:
self.id = id
self.types: dict[Key, Type] = {}
self.unreachable = False
self.conditional_frame = conditional_frame
self.suppress_unreachable_warnings = False
def __repr__(self) -> str:
return f"Frame({self.id}, {self.types}, {self.unreachable}, {self.conditional_frame})"
Assigns = DefaultDict[Expression, List[Tuple[Type, Optional[Type]]]]
class ConditionalTypeBinder:
"""Keep track of conditional types of variables.
NB: Variables are tracked by literal expression, so it is possible
to confuse the binder; for example,
```
class A:
a: Union[int, str] = None
x = A()
lst = [x]
reveal_type(x.a) # Union[int, str]
x.a = 1
reveal_type(x.a) # int
reveal_type(lst[0].a) # Union[int, str]
lst[0].a = 'a'
reveal_type(x.a) # int
reveal_type(lst[0].a) # str
```
"""
# Stored assignments for situations with tuple/list lvalue and rvalue of union type.
# This maps an expression to a list of bound types for every item in the union type.
type_assignments: Assigns | None = None
def __init__(self) -> None:
self.next_id = 1
# The stack of frames currently used. These map
# literal_hash(expr) -- literals like 'foo.bar' --
# to types. The last element of this list is the
# top-most, current frame. Each earlier element
# records the state as of when that frame was last
# on top of the stack.
self.frames = [Frame(self._get_id())]
# For frames higher in the stack, we record the set of
# Frames that can escape there, either by falling off
# the end of the frame or by a loop control construct
# or raised exception. The last element of self.frames
# has no corresponding element in this list.
self.options_on_return: list[list[Frame]] = []
# Maps literal_hash(expr) to get_declaration(expr)
# for every expr stored in the binder
self.declarations: dict[Key, Type | None] = {}
# Set of other keys to invalidate if a key is changed, e.g. x -> {x.a, x[0]}
# Whenever a new key (e.g. x.a.b) is added, we update this
self.dependencies: dict[Key, set[Key]] = {}
# Whether the last pop changed the newly top frame on exit
self.last_pop_changed = False
self.try_frames: set[int] = set()
self.break_frames: list[int] = []
self.continue_frames: list[int] = []
def _get_id(self) -> int:
self.next_id += 1
return self.next_id
def _add_dependencies(self, key: Key, value: Key | None = None) -> None:
if value is None:
value = key
else:
self.dependencies.setdefault(key, set()).add(value)
for elt in subkeys(key):
self._add_dependencies(elt, value)
def push_frame(self, conditional_frame: bool = False) -> Frame:
"""Push a new frame into the binder."""
f = Frame(self._get_id(), conditional_frame)
self.frames.append(f)
self.options_on_return.append([])
return f
def _put(self, key: Key, type: Type, index: int = -1) -> None:
self.frames[index].types[key] = type
def _get(self, key: Key, index: int = -1) -> Type | None:
if index < 0:
index += len(self.frames)
for i in range(index, -1, -1):
if key in self.frames[i].types:
return self.frames[i].types[key]
return None
def put(self, expr: Expression, typ: Type) -> None:
if not isinstance(expr, (IndexExpr, MemberExpr, NameExpr)):
return
if not literal(expr):
return
key = literal_hash(expr)
assert key is not None, "Internal error: binder tried to put non-literal"
if key not in self.declarations:
self.declarations[key] = get_declaration(expr)
self._add_dependencies(key)
self._put(key, typ)
def unreachable(self) -> None:
self.frames[-1].unreachable = True
def suppress_unreachable_warnings(self) -> None:
self.frames[-1].suppress_unreachable_warnings = True
def get(self, expr: Expression) -> Type | None:
key = literal_hash(expr)
assert key is not None, "Internal error: binder tried to get non-literal"
return self._get(key)
def is_unreachable(self) -> bool:
# TODO: Copy the value of unreachable into new frames to avoid
# this traversal on every statement?
return any(f.unreachable for f in self.frames)
def is_unreachable_warning_suppressed(self) -> bool:
return any(f.suppress_unreachable_warnings for f in self.frames)
def cleanse(self, expr: Expression) -> None:
"""Remove all references to a Node from the binder."""
key = literal_hash(expr)
assert key is not None, "Internal error: binder tried cleanse non-literal"
self._cleanse_key(key)
def _cleanse_key(self, key: Key) -> None:
"""Remove all references to a key from the binder."""
for frame in self.frames:
if key in frame.types:
del frame.types[key]
def update_from_options(self, frames: list[Frame]) -> bool:
"""Update the frame to reflect that each key will be updated
as in one of the frames. Return whether any item changes.
If a key is declared as AnyType, only update it if all the
options are the same.
"""
frames = [f for f in frames if not f.unreachable]
changed = False
keys = {key for f in frames for key in f.types}
for key in keys:
current_value = self._get(key)
resulting_values = [f.types.get(key, current_value) for f in frames]
if any(x is None for x in resulting_values):
# We didn't know anything about key before
# (current_value must be None), and we still don't
# know anything about key in at least one possible frame.
continue
type = resulting_values[0]
assert type is not None
declaration_type = get_proper_type(self.declarations.get(key))
if isinstance(declaration_type, AnyType):
# At this point resulting values can't contain None, see continue above
if not all(is_same_type(type, cast(Type, t)) for t in resulting_values[1:]):
type = AnyType(TypeOfAny.from_another_any, source_any=declaration_type)
else:
for other in resulting_values[1:]:
assert other is not None
type = join_simple(self.declarations[key], type, other)
# Try simplifying resulting type for unions involving variadic tuples.
# Technically, everything is still valid without this step, but if we do
# not do this, this may create long unions after exiting an if check like:
# x: tuple[int, ...]
# if len(x) < 10:
# ...
# We want the type of x to be tuple[int, ...] after this block (if it is
# still equivalent to such type).
if isinstance(type, UnionType):
type = collapse_variadic_union(type)
if isinstance(type, ProperType) and isinstance(type, UnionType):
# Simplify away any extra Any's that were added to the declared
# type when popping a frame.
simplified = UnionType.make_union(
[t for t in type.items if not isinstance(get_proper_type(t), AnyType)]
)
if simplified == self.declarations[key]:
type = simplified
if current_value is None or not is_same_type(type, current_value):
self._put(key, type)
changed = True
self.frames[-1].unreachable = not frames
return changed
def pop_frame(self, can_skip: bool, fall_through: int) -> Frame:
"""Pop a frame and return it.
See frame_context() for documentation of fall_through.
"""
if fall_through > 0:
self.allow_jump(-fall_through)
result = self.frames.pop()
options = self.options_on_return.pop()
if can_skip:
options.insert(0, self.frames[-1])
self.last_pop_changed = self.update_from_options(options)
return result
@contextmanager
def accumulate_type_assignments(self) -> Iterator[Assigns]:
"""Push a new map to collect assigned types in multiassign from union.
If this map is not None, actual binding is deferred until all items in
the union are processed (a union of collected items is later bound
manually by the caller).
"""
old_assignments = None
if self.type_assignments is not None:
old_assignments = self.type_assignments
self.type_assignments = defaultdict(list)
yield self.type_assignments
self.type_assignments = old_assignments
def assign_type(
self, expr: Expression, type: Type, declared_type: Type | None, restrict_any: bool = False
) -> None:
# We should erase last known value in binder, because if we are using it,
# it means that the target is not final, and therefore can't hold a literal.
type = remove_instance_last_known_values(type)
if self.type_assignments is not None:
# We are in a multiassign from union, defer the actual binding,
# just collect the types.
self.type_assignments[expr].append((type, declared_type))
return
if not isinstance(expr, (IndexExpr, MemberExpr, NameExpr)):
return
if not literal(expr):
return
self.invalidate_dependencies(expr)
if declared_type is None:
# Not sure why this happens. It seems to mainly happen in
# member initialization.
return
if not is_subtype(type, declared_type):
# Pretty sure this is only happens when there's a type error.
# Ideally this function wouldn't be called if the
# expression has a type error, though -- do other kinds of
# errors cause this function to get called at invalid
# times?
return
p_declared = get_proper_type(declared_type)
p_type = get_proper_type(type)
enclosing_type = get_proper_type(self.most_recent_enclosing_type(expr, type))
if isinstance(enclosing_type, AnyType) and not restrict_any:
# If x is Any and y is int, after x = y we do not infer that x is int.
# This could be changed.
# Instead, since we narrowed type from Any in a recent frame (probably an
# isinstance check), but now it is reassigned, we broaden back
# to Any (which is the most recent enclosing type)
self.put(expr, enclosing_type)
# As a special case, when assigning Any to a variable with a
# declared Optional type that has been narrowed to None,
# replace all the Nones in the declared Union type with Any.
# This overrides the normal behavior of ignoring Any assignments to variables
# in order to prevent false positives.
# (See discussion in #3526)
elif (
isinstance(p_type, AnyType)
and isinstance(p_declared, UnionType)
and any(isinstance(get_proper_type(item), NoneType) for item in p_declared.items)
and isinstance(
get_proper_type(self.most_recent_enclosing_type(expr, NoneType())), NoneType
)
):
# Replace any Nones in the union type with Any
new_items = [
type if isinstance(get_proper_type(item), NoneType) else item
for item in p_declared.items
]
self.put(expr, UnionType(new_items))
elif isinstance(p_type, AnyType) and not (
isinstance(p_declared, UnionType)
and any(isinstance(get_proper_type(item), AnyType) for item in p_declared.items)
):
# Assigning an Any value doesn't affect the type to avoid false negatives, unless
# there is an Any item in a declared union type.
self.put(expr, declared_type)
else:
self.put(expr, type)
for i in self.try_frames:
# XXX This should probably not copy the entire frame, but
# just copy this variable into a single stored frame.
self.allow_jump(i)
def invalidate_dependencies(self, expr: BindableExpression) -> None:
"""Invalidate knowledge of types that include expr, but not expr itself.
For example, when expr is foo.bar, invalidate foo.bar.baz.
It is overly conservative: it invalidates globally, including
in code paths unreachable from here.
"""
key = literal_hash(expr)
assert key is not None
for dep in self.dependencies.get(key, set()):
self._cleanse_key(dep)
def most_recent_enclosing_type(self, expr: BindableExpression, type: Type) -> Type | None:
type = get_proper_type(type)
if isinstance(type, AnyType):
return get_declaration(expr)
key = literal_hash(expr)
assert key is not None
enclosers = [get_declaration(expr)] + [
f.types[key] for f in self.frames if key in f.types and is_subtype(type, f.types[key])
]
return enclosers[-1]
def allow_jump(self, index: int) -> None:
# self.frames and self.options_on_return have different lengths
# so make sure the index is positive
if index < 0:
index += len(self.options_on_return)
frame = Frame(self._get_id())
for f in self.frames[index + 1 :]:
frame.types.update(f.types)
if f.unreachable:
frame.unreachable = True
self.options_on_return[index].append(frame)
def handle_break(self) -> None:
self.allow_jump(self.break_frames[-1])
self.unreachable()
def handle_continue(self) -> None:
self.allow_jump(self.continue_frames[-1])
self.unreachable()
@contextmanager
def frame_context(
self,
*,
can_skip: bool,
fall_through: int = 1,
break_frame: int = 0,
continue_frame: int = 0,
conditional_frame: bool = False,
try_frame: bool = False,
) -> Iterator[Frame]:
"""Return a context manager that pushes/pops frames on enter/exit.
If can_skip is True, control flow is allowed to bypass the
newly-created frame.
If fall_through > 0, then it will allow control flow that
falls off the end of the frame to escape to its ancestor
`fall_through` levels higher. Otherwise control flow ends
at the end of the frame.
If break_frame > 0, then 'break' statements within this frame
will jump out to the frame break_frame levels higher than the
frame created by this call to frame_context. Similarly for
continue_frame and 'continue' statements.
If try_frame is true, then execution is allowed to jump at any
point within the newly created frame (or its descendants) to
its parent (i.e., to the frame that was on top before this
call to frame_context).
After the context manager exits, self.last_pop_changed indicates
whether any types changed in the newly-topmost frame as a result
of popping this frame.
"""
assert len(self.frames) > 1
if break_frame:
self.break_frames.append(len(self.frames) - break_frame)
if continue_frame:
self.continue_frames.append(len(self.frames) - continue_frame)
if try_frame:
self.try_frames.add(len(self.frames) - 1)
new_frame = self.push_frame(conditional_frame)
if try_frame:
# An exception may occur immediately
self.allow_jump(-1)
yield new_frame
self.pop_frame(can_skip, fall_through)
if break_frame:
self.break_frames.pop()
if continue_frame:
self.continue_frames.pop()
if try_frame:
self.try_frames.remove(len(self.frames) - 1)
@contextmanager
def top_frame_context(self) -> Iterator[Frame]:
"""A variant of frame_context for use at the top level of
a namespace (module, function, or class).
"""
assert len(self.frames) == 1
yield self.push_frame()
self.pop_frame(True, 0)
assert len(self.frames) == 1
def get_declaration(expr: BindableExpression) -> Type | None:
if isinstance(expr, RefExpr):
if isinstance(expr.node, Var):
type = expr.node.type
if not isinstance(get_proper_type(type), PartialType):
return type
elif isinstance(expr.node, TypeInfo):
return TypeType(fill_typevars_with_any(expr.node))
return None
def collapse_variadic_union(typ: UnionType) -> Type:
"""Simplify a union involving variadic tuple if possible.
This will collapse a type like e.g.
tuple[X, Z] | tuple[X, Y, Z] | tuple[X, Y, Y, *tuple[Y, ...], Z]
back to
tuple[X, *tuple[Y, ...], Z]
which is equivalent, but much simpler form of the same type.
"""
tuple_items = []
other_items = []
for t in typ.items:
p_t = get_proper_type(t)
if isinstance(p_t, TupleType):
tuple_items.append(p_t)
else:
other_items.append(t)
if len(tuple_items) <= 1:
# This type cannot be simplified further.
return typ
tuple_items = sorted(tuple_items, key=lambda t: len(t.items))
first = tuple_items[0]
last = tuple_items[-1]
unpack_index = find_unpack_in_list(last.items)
if unpack_index is None:
return typ
unpack = last.items[unpack_index]
assert isinstance(unpack, UnpackType)
unpacked = get_proper_type(unpack.type)
if not isinstance(unpacked, Instance):
return typ
assert unpacked.type.fullname == "builtins.tuple"
suffix = last.items[unpack_index + 1 :]
# Check that first item matches the expected pattern and infer prefix.
if len(first.items) < len(suffix):
return typ
if suffix and first.items[-len(suffix) :] != suffix:
return typ
if suffix:
prefix = first.items[: -len(suffix)]
else:
prefix = first.items
# Check that all middle types match the expected pattern as well.
arg = unpacked.args[0]
for i, it in enumerate(tuple_items[1:-1]):
if it.items != prefix + [arg] * (i + 1) + suffix:
return typ
# Check the last item (the one with unpack), and choose an appropriate simplified type.
if last.items != prefix + [arg] * (len(typ.items) - 1) + [unpack] + suffix:
return typ
if len(first.items) == 0:
simplified: Type = unpacked.copy_modified()
else:
simplified = TupleType(prefix + [unpack] + suffix, fallback=last.partial_fallback)
return UnionType.make_union([simplified] + other_items)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/binder.py
|
Python
|
NOASSERTION
| 21,037 |
"""A Bogus[T] type alias for marking when we subvert the type system
We need this for compiling with mypyc, which inserts runtime
typechecks that cause problems when we subvert the type system. So
when compiling with mypyc, we turn those places into Any, while
keeping the types around for normal typechecks.
Since this causes the runtime types to be Any, this is best used
in places where efficient access to properties is not important.
For those cases some other technique should be used.
"""
from __future__ import annotations
from typing import Any, TypeVar
from mypy_extensions import FlexibleAlias
T = TypeVar("T")
# This won't ever be true at runtime, but we consider it true during
# mypyc compilations.
MYPYC = False
if MYPYC:
Bogus = FlexibleAlias[T, Any]
else:
Bogus = FlexibleAlias[T, T]
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/bogus_type.py
|
Python
|
NOASSERTION
| 816 |
"""Facilities to analyze entire programs, including imported modules.
Parse and analyze the source files of a program in the correct order
(based on file dependencies), and collect the results.
This module only directs a build, which is performed in multiple passes per
file. The individual passes are implemented in separate modules.
The function build() is the main interface to this module.
"""
# TODO: More consistent terminology, e.g. path/fnam, module/id, state/file
from __future__ import annotations
import collections
import contextlib
import errno
import gc
import json
import os
import platform
import re
import stat
import sys
import time
import types
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
ClassVar,
Dict,
Final,
Iterator,
Mapping,
NamedTuple,
NoReturn,
Sequence,
TextIO,
)
from typing_extensions import TypeAlias as _TypeAlias, TypedDict
import mypy.semanal_main
from mypy.checker import TypeChecker
from mypy.error_formatter import OUTPUT_CHOICES, ErrorFormatter
from mypy.errors import CompileError, ErrorInfo, Errors, report_internal_error
from mypy.graph_utils import prepare_sccs, strongly_connected_components, topsort
from mypy.indirection import TypeIndirectionVisitor
from mypy.messages import MessageBuilder
from mypy.nodes import Import, ImportAll, ImportBase, ImportFrom, MypyFile, SymbolTable, TypeInfo
from mypy.partially_defined import PossiblyUndefinedVariableVisitor
from mypy.semanal import SemanticAnalyzer
from mypy.semanal_pass1 import SemanticAnalyzerPreAnalysis
from mypy.util import (
DecodeError,
decode_python_encoding,
get_mypy_comments,
hash_digest,
is_stub_package_file,
is_sub_path_normabs,
is_typeshed_file,
module_prefix,
read_py_file,
time_ref,
time_spent_us,
)
if TYPE_CHECKING:
from mypy.report import Reports # Avoid unconditional slow import
from mypy import errorcodes as codes
from mypy.config_parser import parse_mypy_comments
from mypy.fixup import fixup_module
from mypy.freetree import free_tree
from mypy.fscache import FileSystemCache
from mypy.metastore import FilesystemMetadataStore, MetadataStore, SqliteMetadataStore
from mypy.modulefinder import (
BuildSource as BuildSource,
BuildSourceSet as BuildSourceSet,
FindModuleCache,
ModuleNotFoundReason,
ModuleSearchResult,
SearchPaths,
compute_search_paths,
)
from mypy.nodes import Expression
from mypy.options import Options
from mypy.parse import parse
from mypy.plugin import ChainedPlugin, Plugin, ReportConfigContext
from mypy.plugins.default import DefaultPlugin
from mypy.renaming import LimitedVariableRenameVisitor, VariableRenameVisitor
from mypy.stats import dump_type_stats
from mypy.stubinfo import is_module_from_legacy_bundled_package, stub_distribution_name
from mypy.types import Type
from mypy.typestate import reset_global_state, type_state
from mypy.util import json_dumps, json_loads
from mypy.version import __version__
# Switch to True to produce debug output related to fine-grained incremental
# mode only that is useful during development. This produces only a subset of
# output compared to --verbose output. We use a global flag to enable this so
# that it's easy to enable this when running tests.
DEBUG_FINE_GRAINED: Final = False
# These modules are special and should always come from typeshed.
CORE_BUILTIN_MODULES: Final = {
"builtins",
"typing",
"types",
"typing_extensions",
"mypy_extensions",
"_typeshed",
"_collections_abc",
"collections",
"collections.abc",
"sys",
"abc",
}
Graph: _TypeAlias = Dict[str, "State"]
# TODO: Get rid of BuildResult. We might as well return a BuildManager.
class BuildResult:
"""The result of a successful build.
Attributes:
manager: The build manager.
files: Dictionary from module name to related AST node.
types: Dictionary from parse tree node to its inferred type.
used_cache: Whether the build took advantage of a pre-existing cache
errors: List of error messages.
"""
def __init__(self, manager: BuildManager, graph: Graph) -> None:
self.manager = manager
self.graph = graph
self.files = manager.modules
self.types = manager.all_types # Non-empty if export_types True in options
self.used_cache = manager.cache_enabled
self.errors: list[str] = [] # Filled in by build if desired
def build(
sources: list[BuildSource],
options: Options,
alt_lib_path: str | None = None,
flush_errors: Callable[[str | None, list[str], bool], None] | None = None,
fscache: FileSystemCache | None = None,
stdout: TextIO | None = None,
stderr: TextIO | None = None,
extra_plugins: Sequence[Plugin] | None = None,
) -> BuildResult:
"""Analyze a program.
A single call to build performs parsing, semantic analysis and optionally
type checking for the program *and* all imported modules, recursively.
Return BuildResult if successful or only non-blocking errors were found;
otherwise raise CompileError.
If a flush_errors callback is provided, all error messages will be
passed to it and the errors and messages fields of BuildResult and
CompileError (respectively) will be empty. Otherwise those fields will
report any error messages.
Args:
sources: list of sources to build
options: build options
alt_lib_path: an additional directory for looking up library modules
(takes precedence over other directories)
flush_errors: optional function to flush errors after a file is processed
fscache: optionally a file-system cacher
"""
# If we were not given a flush_errors, we use one that will populate those
# fields for callers that want the traditional API.
messages = []
def default_flush_errors(
filename: str | None, new_messages: list[str], is_serious: bool
) -> None:
messages.extend(new_messages)
flush_errors = flush_errors or default_flush_errors
stdout = stdout or sys.stdout
stderr = stderr or sys.stderr
extra_plugins = extra_plugins or []
try:
result = _build(
sources, options, alt_lib_path, flush_errors, fscache, stdout, stderr, extra_plugins
)
result.errors = messages
return result
except CompileError as e:
# CompileErrors raised from an errors object carry all of the
# messages that have not been reported out by error streaming.
# Patch it up to contain either none or all none of the messages,
# depending on whether we are flushing errors.
serious = not e.use_stdout
flush_errors(None, e.messages, serious)
e.messages = messages
raise
def _build(
sources: list[BuildSource],
options: Options,
alt_lib_path: str | None,
flush_errors: Callable[[str | None, list[str], bool], None],
fscache: FileSystemCache | None,
stdout: TextIO,
stderr: TextIO,
extra_plugins: Sequence[Plugin],
) -> BuildResult:
if platform.python_implementation() == "CPython":
# This seems the most reasonable place to tune garbage collection.
gc.set_threshold(150 * 1000)
data_dir = default_data_dir()
fscache = fscache or FileSystemCache()
search_paths = compute_search_paths(sources, options, data_dir, alt_lib_path)
reports = None
if options.report_dirs:
# Import lazily to avoid slowing down startup.
from mypy.report import Reports
reports = Reports(data_dir, options.report_dirs)
source_set = BuildSourceSet(sources)
cached_read = fscache.read
errors = Errors(options, read_source=lambda path: read_py_file(path, cached_read))
plugin, snapshot = load_plugins(options, errors, stdout, extra_plugins)
# Add catch-all .gitignore to cache dir if we created it
cache_dir_existed = os.path.isdir(options.cache_dir)
# Construct a build manager object to hold state during the build.
#
# Ignore current directory prefix in error messages.
manager = BuildManager(
data_dir,
search_paths,
ignore_prefix=os.getcwd(),
source_set=source_set,
reports=reports,
options=options,
version_id=__version__,
plugin=plugin,
plugins_snapshot=snapshot,
errors=errors,
error_formatter=None if options.output is None else OUTPUT_CHOICES.get(options.output),
flush_errors=flush_errors,
fscache=fscache,
stdout=stdout,
stderr=stderr,
)
if manager.verbosity() >= 2:
manager.trace(repr(options))
reset_global_state()
try:
graph = dispatch(sources, manager, stdout)
if not options.fine_grained_incremental:
type_state.reset_all_subtype_caches()
if options.timing_stats is not None:
dump_timing_stats(options.timing_stats, graph)
if options.line_checking_stats is not None:
dump_line_checking_stats(options.line_checking_stats, graph)
return BuildResult(manager, graph)
finally:
t0 = time.time()
manager.metastore.commit()
manager.add_stats(cache_commit_time=time.time() - t0)
manager.log(
"Build finished in %.3f seconds with %d modules, and %d errors"
% (
time.time() - manager.start_time,
len(manager.modules),
manager.errors.num_messages(),
)
)
manager.dump_stats()
if reports is not None:
# Finish the HTML or XML reports even if CompileError was raised.
reports.finish()
if not cache_dir_existed and os.path.isdir(options.cache_dir):
add_catch_all_gitignore(options.cache_dir)
exclude_from_backups(options.cache_dir)
if os.path.isdir(options.cache_dir):
record_missing_stub_packages(options.cache_dir, manager.missing_stub_packages)
def default_data_dir() -> str:
"""Returns directory containing typeshed directory."""
return os.path.dirname(__file__)
def normpath(path: str, options: Options) -> str:
"""Convert path to absolute; but to relative in bazel mode.
(Bazel's distributed cache doesn't like filesystem metadata to
end up in output files.)
"""
# TODO: Could we always use relpath? (A worry in non-bazel
# mode would be that a moved file may change its full module
# name without changing its size, mtime or hash.)
if options.bazel:
return os.path.relpath(path)
else:
return os.path.abspath(path)
class CacheMeta(NamedTuple):
id: str
path: str
mtime: int
size: int
hash: str
dependencies: list[str] # names of imported modules
data_mtime: int # mtime of data_json
data_json: str # path of <id>.data.json
suppressed: list[str] # dependencies that weren't imported
options: dict[str, object] | None # build options
# dep_prios and dep_lines are in parallel with dependencies + suppressed
dep_prios: list[int]
dep_lines: list[int]
interface_hash: str # hash representing the public interface
version_id: str # mypy version for cache invalidation
ignore_all: bool # if errors were ignored
plugin_data: Any # config data from plugins
# NOTE: dependencies + suppressed == all reachable imports;
# suppressed contains those reachable imports that were prevented by
# silent mode or simply not found.
# Metadata for the fine-grained dependencies file associated with a module.
class FgDepMeta(TypedDict):
path: str
mtime: int
def cache_meta_from_dict(meta: dict[str, Any], data_json: str) -> CacheMeta:
"""Build a CacheMeta object from a json metadata dictionary
Args:
meta: JSON metadata read from the metadata cache file
data_json: Path to the .data.json file containing the AST trees
"""
sentinel: Any = None # Values to be validated by the caller
return CacheMeta(
meta.get("id", sentinel),
meta.get("path", sentinel),
int(meta["mtime"]) if "mtime" in meta else sentinel,
meta.get("size", sentinel),
meta.get("hash", sentinel),
meta.get("dependencies", []),
int(meta["data_mtime"]) if "data_mtime" in meta else sentinel,
data_json,
meta.get("suppressed", []),
meta.get("options"),
meta.get("dep_prios", []),
meta.get("dep_lines", []),
meta.get("interface_hash", ""),
meta.get("version_id", sentinel),
meta.get("ignore_all", True),
meta.get("plugin_data", None),
)
# Priorities used for imports. (Here, top-level includes inside a class.)
# These are used to determine a more predictable order in which the
# nodes in an import cycle are processed.
PRI_HIGH: Final = 5 # top-level "from X import blah"
PRI_MED: Final = 10 # top-level "import X"
PRI_LOW: Final = 20 # either form inside a function
PRI_MYPY: Final = 25 # inside "if MYPY" or "if TYPE_CHECKING"
PRI_INDIRECT: Final = 30 # an indirect dependency
PRI_ALL: Final = 99 # include all priorities
def import_priority(imp: ImportBase, toplevel_priority: int) -> int:
"""Compute import priority from an import node."""
if not imp.is_top_level:
# Inside a function
return PRI_LOW
if imp.is_mypy_only:
# Inside "if MYPY" or "if typing.TYPE_CHECKING"
return max(PRI_MYPY, toplevel_priority)
# A regular import; priority determined by argument.
return toplevel_priority
def load_plugins_from_config(
options: Options, errors: Errors, stdout: TextIO
) -> tuple[list[Plugin], dict[str, str]]:
"""Load all configured plugins.
Return a list of all the loaded plugins from the config file.
The second return value is a snapshot of versions/hashes of loaded user
plugins (for cache validation).
"""
import importlib
snapshot: dict[str, str] = {}
if not options.config_file:
return [], snapshot
line = find_config_file_line_number(options.config_file, "mypy", "plugins")
if line == -1:
line = 1 # We need to pick some line number that doesn't look too confusing
def plugin_error(message: str) -> NoReturn:
errors.report(line, 0, message)
errors.raise_error(use_stdout=False)
custom_plugins: list[Plugin] = []
errors.set_file(options.config_file, None, options)
for plugin_path in options.plugins:
func_name = "plugin"
plugin_dir: str | None = None
if ":" in os.path.basename(plugin_path):
plugin_path, func_name = plugin_path.rsplit(":", 1)
if plugin_path.endswith(".py"):
# Plugin paths can be relative to the config file location.
plugin_path = os.path.join(os.path.dirname(options.config_file), plugin_path)
if not os.path.isfile(plugin_path):
plugin_error(f'Can\'t find plugin "{plugin_path}"')
# Use an absolute path to avoid populating the cache entry
# for 'tmp' during tests, since it will be different in
# different tests.
plugin_dir = os.path.abspath(os.path.dirname(plugin_path))
fnam = os.path.basename(plugin_path)
module_name = fnam[:-3]
sys.path.insert(0, plugin_dir)
elif re.search(r"[\\/]", plugin_path):
fnam = os.path.basename(plugin_path)
plugin_error(f'Plugin "{fnam}" does not have a .py extension')
else:
module_name = plugin_path
try:
module = importlib.import_module(module_name)
except Exception as exc:
plugin_error(f'Error importing plugin "{plugin_path}": {exc}')
finally:
if plugin_dir is not None:
assert sys.path[0] == plugin_dir
del sys.path[0]
if not hasattr(module, func_name):
plugin_error(
'Plugin "{}" does not define entry point function "{}"'.format(
plugin_path, func_name
)
)
try:
plugin_type = getattr(module, func_name)(__version__)
except Exception:
print(f"Error calling the plugin(version) entry point of {plugin_path}\n", file=stdout)
raise # Propagate to display traceback
if not isinstance(plugin_type, type):
plugin_error(
'Type object expected as the return value of "plugin"; got {!r} (in {})'.format(
plugin_type, plugin_path
)
)
if not issubclass(plugin_type, Plugin):
plugin_error(
'Return value of "plugin" must be a subclass of "mypy.plugin.Plugin" '
"(in {})".format(plugin_path)
)
try:
custom_plugins.append(plugin_type(options))
snapshot[module_name] = take_module_snapshot(module)
except Exception:
print(f"Error constructing plugin instance of {plugin_type.__name__}\n", file=stdout)
raise # Propagate to display traceback
return custom_plugins, snapshot
def load_plugins(
options: Options, errors: Errors, stdout: TextIO, extra_plugins: Sequence[Plugin]
) -> tuple[Plugin, dict[str, str]]:
"""Load all configured plugins.
Return a plugin that encapsulates all plugins chained together. Always
at least include the default plugin (it's last in the chain).
The second return value is a snapshot of versions/hashes of loaded user
plugins (for cache validation).
"""
custom_plugins, snapshot = load_plugins_from_config(options, errors, stdout)
custom_plugins += extra_plugins
default_plugin: Plugin = DefaultPlugin(options)
if not custom_plugins:
return default_plugin, snapshot
# Custom plugins take precedence over the default plugin.
return ChainedPlugin(options, custom_plugins + [default_plugin]), snapshot
def take_module_snapshot(module: types.ModuleType) -> str:
"""Take plugin module snapshot by recording its version and hash.
We record _both_ hash and the version to detect more possible changes
(e.g. if there is a change in modules imported by a plugin).
"""
if hasattr(module, "__file__"):
assert module.__file__ is not None
with open(module.__file__, "rb") as f:
digest = hash_digest(f.read())
else:
digest = "unknown"
ver = getattr(module, "__version__", "none")
return f"{ver}:{digest}"
def find_config_file_line_number(path: str, section: str, setting_name: str) -> int:
"""Return the approximate location of setting_name within mypy config file.
Return -1 if can't determine the line unambiguously.
"""
in_desired_section = False
try:
results = []
with open(path, encoding="UTF-8") as f:
for i, line in enumerate(f):
line = line.strip()
if line.startswith("[") and line.endswith("]"):
current_section = line[1:-1].strip()
in_desired_section = current_section == section
elif in_desired_section and re.match(rf"{setting_name}\s*=", line):
results.append(i + 1)
if len(results) == 1:
return results[0]
except OSError:
pass
return -1
class BuildManager:
"""This class holds shared state for building a mypy program.
It is used to coordinate parsing, import processing, semantic
analysis and type checking. The actual build steps are carried
out by dispatch().
Attributes:
data_dir: Mypy data directory (contains stubs)
search_paths: SearchPaths instance indicating where to look for modules
modules: Mapping of module ID to MypyFile (shared by the passes)
semantic_analyzer:
Semantic analyzer, pass 2
all_types: Map {Expression: Type} from all modules (enabled by export_types)
options: Build options
missing_modules: Set of modules that could not be imported encountered so far
stale_modules: Set of modules that needed to be rechecked (only used by tests)
fg_deps_meta: Metadata for fine-grained dependencies caches associated with modules
fg_deps: A fine-grained dependency map
version_id: The current mypy version (based on commit id when possible)
plugin: Active mypy plugin(s)
plugins_snapshot:
Snapshot of currently active user plugins (versions and hashes)
old_plugins_snapshot:
Plugins snapshot from previous incremental run (or None in
non-incremental mode and if cache was not found)
errors: Used for reporting all errors
flush_errors: A function for processing errors after each SCC
cache_enabled: Whether cache is being read. This is set based on options,
but is disabled if fine-grained cache loading fails
and after an initial fine-grained load. This doesn't
determine whether we write cache files or not.
quickstart_state:
A cache of filename -> mtime/size/hash info used to avoid
needing to hash source files when using a cache with mismatching mtimes
stats: Dict with various instrumentation numbers, it is used
not only for debugging, but also required for correctness,
in particular to check consistency of the fine-grained dependency cache.
fscache: A file system cacher
ast_cache: AST cache to speed up mypy daemon
"""
def __init__(
self,
data_dir: str,
search_paths: SearchPaths,
ignore_prefix: str,
source_set: BuildSourceSet,
reports: Reports | None,
options: Options,
version_id: str,
plugin: Plugin,
plugins_snapshot: dict[str, str],
errors: Errors,
flush_errors: Callable[[str | None, list[str], bool], None],
fscache: FileSystemCache,
stdout: TextIO,
stderr: TextIO,
error_formatter: ErrorFormatter | None = None,
) -> None:
self.stats: dict[str, Any] = {} # Values are ints or floats
self.stdout = stdout
self.stderr = stderr
self.start_time = time.time()
self.data_dir = data_dir
self.errors = errors
self.errors.set_ignore_prefix(ignore_prefix)
self.error_formatter = error_formatter
self.search_paths = search_paths
self.source_set = source_set
self.reports = reports
self.options = options
self.version_id = version_id
self.modules: dict[str, MypyFile] = {}
self.missing_modules: set[str] = set()
self.fg_deps_meta: dict[str, FgDepMeta] = {}
# fg_deps holds the dependencies of every module that has been
# processed. We store this in BuildManager so that we can compute
# dependencies as we go, which allows us to free ASTs and type information,
# saving a ton of memory on net.
self.fg_deps: dict[str, set[str]] = {}
# Always convert the plugin to a ChainedPlugin so that it can be manipulated if needed
if not isinstance(plugin, ChainedPlugin):
plugin = ChainedPlugin(options, [plugin])
self.plugin = plugin
# Set of namespaces (module or class) that are being populated during semantic
# analysis and may have missing definitions.
self.incomplete_namespaces: set[str] = set()
self.semantic_analyzer = SemanticAnalyzer(
self.modules,
self.missing_modules,
self.incomplete_namespaces,
self.errors,
self.plugin,
)
self.all_types: dict[Expression, Type] = {} # Enabled by export_types
self.indirection_detector = TypeIndirectionVisitor()
self.stale_modules: set[str] = set()
self.rechecked_modules: set[str] = set()
self.flush_errors = flush_errors
has_reporters = reports is not None and reports.reporters
self.cache_enabled = (
options.incremental
and (not options.fine_grained_incremental or options.use_fine_grained_cache)
and not has_reporters
)
self.fscache = fscache
self.find_module_cache = FindModuleCache(
self.search_paths, self.fscache, self.options, source_set=self.source_set
)
for module in CORE_BUILTIN_MODULES:
if options.use_builtins_fixtures:
continue
path = self.find_module_cache.find_module(module, fast_path=True)
if not isinstance(path, str):
raise CompileError(
[f"Failed to find builtin module {module}, perhaps typeshed is broken?"]
)
if is_typeshed_file(options.abs_custom_typeshed_dir, path) or is_stub_package_file(
path
):
continue
raise CompileError(
[
f'mypy: "{os.path.relpath(path)}" shadows library module "{module}"',
f'note: A user-defined top-level module with name "{module}" is not supported',
]
)
self.metastore = create_metastore(options)
# a mapping from source files to their corresponding shadow files
# for efficient lookup
self.shadow_map: dict[str, str] = {}
if self.options.shadow_file is not None:
self.shadow_map = dict(self.options.shadow_file)
# a mapping from each file being typechecked to its possible shadow file
self.shadow_equivalence_map: dict[str, str | None] = {}
self.plugin = plugin
self.plugins_snapshot = plugins_snapshot
self.old_plugins_snapshot = read_plugins_snapshot(self)
self.quickstart_state = read_quickstart_file(options, self.stdout)
# Fine grained targets (module top levels and top level functions) processed by
# the semantic analyzer, used only for testing. Currently used only by the new
# semantic analyzer. Tuple of module and target name.
self.processed_targets: list[tuple[str, str]] = []
# Missing stub packages encountered.
self.missing_stub_packages: set[str] = set()
# Cache for mypy ASTs that have completed semantic analysis
# pass 1. When multiple files are added to the build in a
# single daemon increment, only one of the files gets added
# per step and the others are discarded. This gets repeated
# until all the files have been added. This means that a
# new file can be processed O(n**2) times. This cache
# avoids most of this redundant work.
self.ast_cache: dict[str, tuple[MypyFile, list[ErrorInfo]]] = {}
def dump_stats(self) -> None:
if self.options.dump_build_stats:
print("Stats:")
for key, value in sorted(self.stats_summary().items()):
print(f"{key + ':':24}{value}")
def use_fine_grained_cache(self) -> bool:
return self.cache_enabled and self.options.use_fine_grained_cache
def maybe_swap_for_shadow_path(self, path: str) -> str:
if not self.shadow_map:
return path
path = normpath(path, self.options)
previously_checked = path in self.shadow_equivalence_map
if not previously_checked:
for source, shadow in self.shadow_map.items():
if self.fscache.samefile(path, source):
self.shadow_equivalence_map[path] = shadow
break
else:
self.shadow_equivalence_map[path] = None
shadow_file = self.shadow_equivalence_map.get(path)
return shadow_file if shadow_file else path
def get_stat(self, path: str) -> os.stat_result | None:
return self.fscache.stat_or_none(self.maybe_swap_for_shadow_path(path))
def getmtime(self, path: str) -> int:
"""Return a file's mtime; but 0 in bazel mode.
(Bazel's distributed cache doesn't like filesystem metadata to
end up in output files.)
"""
if self.options.bazel:
return 0
else:
return int(self.metastore.getmtime(path))
def all_imported_modules_in_file(self, file: MypyFile) -> list[tuple[int, str, int]]:
"""Find all reachable import statements in a file.
Return list of tuples (priority, module id, import line number)
for all modules imported in file; lower numbers == higher priority.
Can generate blocking errors on bogus relative imports.
"""
def correct_rel_imp(imp: ImportFrom | ImportAll) -> str:
"""Function to correct for relative imports."""
file_id = file.fullname
rel = imp.relative
if rel == 0:
return imp.id
if os.path.basename(file.path).startswith("__init__."):
rel -= 1
if rel != 0:
file_id = ".".join(file_id.split(".")[:-rel])
new_id = file_id + "." + imp.id if imp.id else file_id
if not new_id:
self.errors.set_file(file.path, file.name, self.options)
self.errors.report(
imp.line, 0, "No parent module -- cannot perform relative import", blocker=True
)
return new_id
res: list[tuple[int, str, int]] = []
for imp in file.imports:
if not imp.is_unreachable:
if isinstance(imp, Import):
pri = import_priority(imp, PRI_MED)
ancestor_pri = import_priority(imp, PRI_LOW)
for id, _ in imp.ids:
res.append((pri, id, imp.line))
ancestor_parts = id.split(".")[:-1]
ancestors = []
for part in ancestor_parts:
ancestors.append(part)
res.append((ancestor_pri, ".".join(ancestors), imp.line))
elif isinstance(imp, ImportFrom):
cur_id = correct_rel_imp(imp)
all_are_submodules = True
# Also add any imported names that are submodules.
pri = import_priority(imp, PRI_MED)
for name, __ in imp.names:
sub_id = cur_id + "." + name
if self.is_module(sub_id):
res.append((pri, sub_id, imp.line))
else:
all_are_submodules = False
# Add cur_id as a dependency, even if all of the
# imports are submodules. Processing import from will try
# to look through cur_id, so we should depend on it.
# As a workaround for for some bugs in cycle handling (#4498),
# if all of the imports are submodules, do the import at a lower
# priority.
pri = import_priority(imp, PRI_HIGH if not all_are_submodules else PRI_LOW)
res.append((pri, cur_id, imp.line))
elif isinstance(imp, ImportAll):
pri = import_priority(imp, PRI_HIGH)
res.append((pri, correct_rel_imp(imp), imp.line))
# Sort such that module (e.g. foo.bar.baz) comes before its ancestors (e.g. foo
# and foo.bar) so that, if FindModuleCache finds the target module in a
# package marked with py.typed underneath a namespace package installed in
# site-packages, (gasp), that cache's knowledge of the ancestors
# (aka FindModuleCache.ns_ancestors) can be primed when it is asked to find
# the parent.
res.sort(key=lambda x: -x[1].count("."))
return res
def is_module(self, id: str) -> bool:
"""Is there a file in the file system corresponding to module id?"""
return find_module_simple(id, self) is not None
def parse_file(
self, id: str, path: str, source: str, ignore_errors: bool, options: Options
) -> MypyFile:
"""Parse the source of a file with the given name.
Raise CompileError if there is a parse error.
"""
t0 = time.time()
if ignore_errors:
self.errors.ignored_files.add(path)
tree = parse(source, path, id, self.errors, options=options)
tree._fullname = id
self.add_stats(
files_parsed=1,
modules_parsed=int(not tree.is_stub),
stubs_parsed=int(tree.is_stub),
parse_time=time.time() - t0,
)
if self.errors.is_blockers():
self.log("Bailing due to parse errors")
self.errors.raise_error()
self.errors.set_file_ignored_lines(path, tree.ignored_lines, ignore_errors)
return tree
def load_fine_grained_deps(self, id: str) -> dict[str, set[str]]:
t0 = time.time()
if id in self.fg_deps_meta:
# TODO: Assert deps file wasn't changed.
deps = json_loads(self.metastore.read(self.fg_deps_meta[id]["path"]))
else:
deps = {}
val = {k: set(v) for k, v in deps.items()}
self.add_stats(load_fg_deps_time=time.time() - t0)
return val
def report_file(
self, file: MypyFile, type_map: dict[Expression, Type], options: Options
) -> None:
if self.reports is not None and self.source_set.is_source(file):
self.reports.file(file, self.modules, type_map, options)
def verbosity(self) -> int:
return self.options.verbosity
def log(self, *message: str) -> None:
if self.verbosity() >= 1:
if message:
print("LOG: ", *message, file=self.stderr)
else:
print(file=self.stderr)
self.stderr.flush()
def log_fine_grained(self, *message: str) -> None:
import mypy.build
if self.verbosity() >= 1:
self.log("fine-grained:", *message)
elif mypy.build.DEBUG_FINE_GRAINED:
# Output log in a simplified format that is quick to browse.
if message:
print(*message, file=self.stderr)
else:
print(file=self.stderr)
self.stderr.flush()
def trace(self, *message: str) -> None:
if self.verbosity() >= 2:
print("TRACE:", *message, file=self.stderr)
self.stderr.flush()
def add_stats(self, **kwds: Any) -> None:
for key, value in kwds.items():
if key in self.stats:
self.stats[key] += value
else:
self.stats[key] = value
def stats_summary(self) -> Mapping[str, object]:
return self.stats
def deps_to_json(x: dict[str, set[str]]) -> bytes:
return json_dumps({k: list(v) for k, v in x.items()})
# File for storing metadata about all the fine-grained dependency caches
DEPS_META_FILE: Final = "@deps.meta.json"
# File for storing fine-grained dependencies that didn't a parent in the build
DEPS_ROOT_FILE: Final = "@root.deps.json"
# The name of the fake module used to store fine-grained dependencies that
# have no other place to go.
FAKE_ROOT_MODULE: Final = "@root"
def write_deps_cache(
rdeps: dict[str, dict[str, set[str]]], manager: BuildManager, graph: Graph
) -> None:
"""Write cache files for fine-grained dependencies.
Serialize fine-grained dependencies map for fine grained mode.
Dependencies on some module 'm' is stored in the dependency cache
file m.deps.json. This entails some spooky action at a distance:
if module 'n' depends on 'm', that produces entries in m.deps.json.
When there is a dependency on a module that does not exist in the
build, it is stored with its first existing parent module. If no
such module exists, it is stored with the fake module FAKE_ROOT_MODULE.
This means that the validity of the fine-grained dependency caches
are a global property, so we store validity checking information for
fine-grained dependencies in a global cache file:
* We take a snapshot of current sources to later check consistency
between the fine-grained dependency cache and module cache metadata
* We store the mtime of all of the dependency files to verify they
haven't changed
"""
metastore = manager.metastore
error = False
fg_deps_meta = manager.fg_deps_meta.copy()
for id in rdeps:
if id != FAKE_ROOT_MODULE:
_, _, deps_json = get_cache_names(id, graph[id].xpath, manager.options)
else:
deps_json = DEPS_ROOT_FILE
assert deps_json
manager.log("Writing deps cache", deps_json)
if not manager.metastore.write(deps_json, deps_to_json(rdeps[id])):
manager.log(f"Error writing fine-grained deps JSON file {deps_json}")
error = True
else:
fg_deps_meta[id] = {"path": deps_json, "mtime": manager.getmtime(deps_json)}
meta_snapshot: dict[str, str] = {}
for id, st in graph.items():
# If we didn't parse a file (so it doesn't have a
# source_hash), then it must be a module with a fresh cache,
# so use the hash from that.
if st.source_hash:
hash = st.source_hash
else:
assert st.meta, "Module must be either parsed or cached"
hash = st.meta.hash
meta_snapshot[id] = hash
meta = {"snapshot": meta_snapshot, "deps_meta": fg_deps_meta}
if not metastore.write(DEPS_META_FILE, json_dumps(meta)):
manager.log(f"Error writing fine-grained deps meta JSON file {DEPS_META_FILE}")
error = True
if error:
manager.errors.set_file(_cache_dir_prefix(manager.options), None, manager.options)
manager.errors.report(0, 0, "Error writing fine-grained dependencies cache", blocker=True)
def invert_deps(deps: dict[str, set[str]], graph: Graph) -> dict[str, dict[str, set[str]]]:
"""Splits fine-grained dependencies based on the module of the trigger.
Returns a dictionary from module ids to all dependencies on that
module. Dependencies not associated with a module in the build will be
associated with the nearest parent module that is in the build, or the
fake module FAKE_ROOT_MODULE if none are.
"""
# Lazy import to speed up startup
from mypy.server.target import trigger_to_target
# Prepopulate the map for all the modules that have been processed,
# so that we always generate files for processed modules (even if
# there aren't any dependencies to them.)
rdeps: dict[str, dict[str, set[str]]] = {id: {} for id, st in graph.items() if st.tree}
for trigger, targets in deps.items():
module = module_prefix(graph, trigger_to_target(trigger))
if not module or not graph[module].tree:
module = FAKE_ROOT_MODULE
mod_rdeps = rdeps.setdefault(module, {})
mod_rdeps.setdefault(trigger, set()).update(targets)
return rdeps
def generate_deps_for_cache(manager: BuildManager, graph: Graph) -> dict[str, dict[str, set[str]]]:
"""Generate fine-grained dependencies into a form suitable for serializing.
This does a couple things:
1. Splits fine-grained deps based on the module of the trigger
2. For each module we generated fine-grained deps for, load any previous
deps and merge them in.
Returns a dictionary from module ids to all dependencies on that
module. Dependencies not associated with a module in the build will be
associated with the nearest parent module that is in the build, or the
fake module FAKE_ROOT_MODULE if none are.
"""
from mypy.server.deps import merge_dependencies # Lazy import to speed up startup
# Split the dependencies out into based on the module that is depended on.
rdeps = invert_deps(manager.fg_deps, graph)
# We can't just clobber existing dependency information, so we
# load the deps for every module we've generated new dependencies
# to and merge the new deps into them.
for module, mdeps in rdeps.items():
old_deps = manager.load_fine_grained_deps(module)
merge_dependencies(old_deps, mdeps)
return rdeps
PLUGIN_SNAPSHOT_FILE: Final = "@plugins_snapshot.json"
def write_plugins_snapshot(manager: BuildManager) -> None:
"""Write snapshot of versions and hashes of currently active plugins."""
snapshot = json_dumps(manager.plugins_snapshot)
if not manager.metastore.write(PLUGIN_SNAPSHOT_FILE, snapshot):
manager.errors.set_file(_cache_dir_prefix(manager.options), None, manager.options)
manager.errors.report(0, 0, "Error writing plugins snapshot", blocker=True)
def read_plugins_snapshot(manager: BuildManager) -> dict[str, str] | None:
"""Read cached snapshot of versions and hashes of plugins from previous run."""
snapshot = _load_json_file(
PLUGIN_SNAPSHOT_FILE,
manager,
log_success="Plugins snapshot ",
log_error="Could not load plugins snapshot: ",
)
if snapshot is None:
return None
if not isinstance(snapshot, dict):
manager.log(f"Could not load plugins snapshot: cache is not a dict: {type(snapshot)}")
return None
return snapshot
def read_quickstart_file(
options: Options, stdout: TextIO
) -> dict[str, tuple[float, int, str]] | None:
quickstart: dict[str, tuple[float, int, str]] | None = None
if options.quickstart_file:
# This is very "best effort". If the file is missing or malformed,
# just ignore it.
raw_quickstart: dict[str, Any] = {}
try:
with open(options.quickstart_file, "rb") as f:
raw_quickstart = json_loads(f.read())
quickstart = {}
for file, (x, y, z) in raw_quickstart.items():
quickstart[file] = (x, y, z)
except Exception as e:
print(f"Warning: Failed to load quickstart file: {str(e)}\n", file=stdout)
return quickstart
def read_deps_cache(manager: BuildManager, graph: Graph) -> dict[str, FgDepMeta] | None:
"""Read and validate the fine-grained dependencies cache.
See the write_deps_cache documentation for more information on
the details of the cache.
Returns None if the cache was invalid in some way.
"""
deps_meta = _load_json_file(
DEPS_META_FILE,
manager,
log_success="Deps meta ",
log_error="Could not load fine-grained dependency metadata: ",
)
if deps_meta is None:
return None
meta_snapshot = deps_meta["snapshot"]
# Take a snapshot of the source hashes from all of the metas we found.
# (Including the ones we rejected because they were out of date.)
# We use this to verify that they match up with the proto_deps.
current_meta_snapshot = {
id: st.meta_source_hash for id, st in graph.items() if st.meta_source_hash is not None
}
common = set(meta_snapshot.keys()) & set(current_meta_snapshot.keys())
if any(meta_snapshot[id] != current_meta_snapshot[id] for id in common):
# TODO: invalidate also if options changed (like --strict-optional)?
manager.log("Fine-grained dependencies cache inconsistent, ignoring")
return None
module_deps_metas = deps_meta["deps_meta"]
assert isinstance(module_deps_metas, dict)
if not manager.options.skip_cache_mtime_checks:
for meta in module_deps_metas.values():
try:
matched = manager.getmtime(meta["path"]) == meta["mtime"]
except FileNotFoundError:
matched = False
if not matched:
manager.log(f"Invalid or missing fine-grained deps cache: {meta['path']}")
return None
return module_deps_metas
def _load_json_file(
file: str, manager: BuildManager, log_success: str, log_error: str
) -> dict[str, Any] | None:
"""A simple helper to read a JSON file with logging."""
t0 = time.time()
try:
data = manager.metastore.read(file)
except OSError:
manager.log(log_error + file)
return None
manager.add_stats(metastore_read_time=time.time() - t0)
# Only bother to compute the log message if we are logging it, since it could be big
if manager.verbosity() >= 2:
manager.trace(log_success + data.rstrip().decode())
try:
t1 = time.time()
result = json_loads(data)
manager.add_stats(data_json_load_time=time.time() - t1)
except json.JSONDecodeError:
manager.errors.set_file(file, None, manager.options)
manager.errors.report(
-1,
-1,
"Error reading JSON file;"
" you likely have a bad cache.\n"
"Try removing the {cache_dir} directory"
" and run mypy again.".format(cache_dir=manager.options.cache_dir),
blocker=True,
)
return None
else:
assert isinstance(result, dict)
return result
def _cache_dir_prefix(options: Options) -> str:
"""Get current cache directory (or file if id is given)."""
if options.bazel:
# This is needed so the cache map works.
return os.curdir
cache_dir = options.cache_dir
pyversion = options.python_version
base = os.path.join(cache_dir, "%d.%d" % pyversion)
return base
def add_catch_all_gitignore(target_dir: str) -> None:
"""Add catch-all .gitignore to an existing directory.
No-op if the .gitignore already exists.
"""
gitignore = os.path.join(target_dir, ".gitignore")
try:
with open(gitignore, "x") as f:
print("# Automatically created by mypy", file=f)
print("*", file=f)
except FileExistsError:
pass
def exclude_from_backups(target_dir: str) -> None:
"""Exclude the directory from various archives and backups supporting CACHEDIR.TAG.
If the CACHEDIR.TAG file exists the function is a no-op.
"""
cachedir_tag = os.path.join(target_dir, "CACHEDIR.TAG")
try:
with open(cachedir_tag, "x") as f:
f.write(
"""Signature: 8a477f597d28d172789f06886806bc55
# This file is a cache directory tag automatically created by mypy.
# For information about cache directory tags see https://bford.info/cachedir/
"""
)
except FileExistsError:
pass
def create_metastore(options: Options) -> MetadataStore:
"""Create the appropriate metadata store."""
if options.sqlite_cache:
mds: MetadataStore = SqliteMetadataStore(_cache_dir_prefix(options))
else:
mds = FilesystemMetadataStore(_cache_dir_prefix(options))
return mds
def get_cache_names(id: str, path: str, options: Options) -> tuple[str, str, str | None]:
"""Return the file names for the cache files.
Args:
id: module ID
path: module path
cache_dir: cache directory
pyversion: Python version (major, minor)
Returns:
A tuple with the file names to be used for the meta JSON, the
data JSON, and the fine-grained deps JSON, respectively.
"""
if options.cache_map:
pair = options.cache_map.get(normpath(path, options))
else:
pair = None
if pair is not None:
# The cache map paths were specified relative to the base directory,
# but the filesystem metastore APIs operates relative to the cache
# prefix directory.
# Solve this by rewriting the paths as relative to the root dir.
# This only makes sense when using the filesystem backed cache.
root = _cache_dir_prefix(options)
return (os.path.relpath(pair[0], root), os.path.relpath(pair[1], root), None)
prefix = os.path.join(*id.split("."))
is_package = os.path.basename(path).startswith("__init__.py")
if is_package:
prefix = os.path.join(prefix, "__init__")
deps_json = None
if options.cache_fine_grained:
deps_json = prefix + ".deps.json"
return (prefix + ".meta.json", prefix + ".data.json", deps_json)
def find_cache_meta(id: str, path: str, manager: BuildManager) -> CacheMeta | None:
"""Find cache data for a module.
Args:
id: module ID
path: module path
manager: the build manager (for pyversion, log/trace, and build options)
Returns:
A CacheMeta instance if the cache data was found and appears
valid; otherwise None.
"""
# TODO: May need to take more build options into account
meta_json, data_json, _ = get_cache_names(id, path, manager.options)
manager.trace(f"Looking for {id} at {meta_json}")
t0 = time.time()
meta = _load_json_file(
meta_json, manager, log_success=f"Meta {id} ", log_error=f"Could not load cache for {id}: "
)
t1 = time.time()
if meta is None:
return None
if not isinstance(meta, dict):
manager.log(f"Could not load cache for {id}: meta cache is not a dict: {repr(meta)}")
return None
m = cache_meta_from_dict(meta, data_json)
t2 = time.time()
manager.add_stats(
load_meta_time=t2 - t0, load_meta_load_time=t1 - t0, load_meta_from_dict_time=t2 - t1
)
# Don't check for path match, that is dealt with in validate_meta().
#
# TODO: these `type: ignore`s wouldn't be necessary
# if the type annotations for CacheMeta were more accurate
# (all of these attributes can be `None`)
if (
m.id != id
or m.mtime is None # type: ignore[redundant-expr]
or m.size is None # type: ignore[redundant-expr]
or m.dependencies is None # type: ignore[redundant-expr]
or m.data_mtime is None
):
manager.log(f"Metadata abandoned for {id}: attributes are missing")
return None
# Ignore cache if generated by an older mypy version.
if (
(m.version_id != manager.version_id and not manager.options.skip_version_check)
or m.options is None
or len(m.dependencies) + len(m.suppressed) != len(m.dep_prios)
or len(m.dependencies) + len(m.suppressed) != len(m.dep_lines)
):
manager.log(f"Metadata abandoned for {id}: new attributes are missing")
return None
# Ignore cache if (relevant) options aren't the same.
# Note that it's fine to mutilate cached_options since it's only used here.
cached_options = m.options
current_options = manager.options.clone_for_module(id).select_options_affecting_cache()
if manager.options.skip_version_check:
# When we're lax about version we're also lax about platform.
cached_options["platform"] = current_options["platform"]
if "debug_cache" in cached_options:
# Older versions included debug_cache, but it's silly to compare it.
del cached_options["debug_cache"]
if cached_options != current_options:
manager.log(f"Metadata abandoned for {id}: options differ")
if manager.options.verbosity >= 2:
for key in sorted(set(cached_options) | set(current_options)):
if cached_options.get(key) != current_options.get(key):
manager.trace(
" {}: {} != {}".format(
key, cached_options.get(key), current_options.get(key)
)
)
return None
if manager.old_plugins_snapshot and manager.plugins_snapshot:
# Check if plugins are still the same.
if manager.plugins_snapshot != manager.old_plugins_snapshot:
manager.log(f"Metadata abandoned for {id}: plugins differ")
return None
# So that plugins can return data with tuples in it without
# things silently always invalidating modules, we round-trip
# the config data. This isn't beautiful.
plugin_data = json_loads(
json_dumps(manager.plugin.report_config_data(ReportConfigContext(id, path, is_check=True)))
)
if m.plugin_data != plugin_data:
manager.log(f"Metadata abandoned for {id}: plugin configuration differs")
return None
manager.add_stats(fresh_metas=1)
return m
def validate_meta(
meta: CacheMeta | None, id: str, path: str | None, ignore_all: bool, manager: BuildManager
) -> CacheMeta | None:
"""Checks whether the cached AST of this module can be used.
Returns:
None, if the cached AST is unusable.
Original meta, if mtime/size matched.
Meta with mtime updated to match source file, if hash/size matched but mtime/path didn't.
"""
# This requires two steps. The first one is obvious: we check that the module source file
# contents is the same as it was when the cache data file was created. The second one is not
# too obvious: we check that the cache data file mtime has not changed; it is needed because
# we use cache data file mtime to propagate information about changes in the dependencies.
if meta is None:
manager.log(f"Metadata not found for {id}")
return None
if meta.ignore_all and not ignore_all:
manager.log(f"Metadata abandoned for {id}: errors were previously ignored")
return None
t0 = time.time()
bazel = manager.options.bazel
assert path is not None, "Internal error: meta was provided without a path"
if not manager.options.skip_cache_mtime_checks:
# Check data_json; assume if its mtime matches it's good.
try:
data_mtime = manager.getmtime(meta.data_json)
except OSError:
manager.log(f"Metadata abandoned for {id}: failed to stat data_json")
return None
if data_mtime != meta.data_mtime:
manager.log(f"Metadata abandoned for {id}: data cache is modified")
return None
if bazel:
# Normalize path under bazel to make sure it isn't absolute
path = normpath(path, manager.options)
st = manager.get_stat(path)
if st is None:
return None
if not stat.S_ISDIR(st.st_mode) and not stat.S_ISREG(st.st_mode):
manager.log(f"Metadata abandoned for {id}: file or directory {path} does not exist")
return None
manager.add_stats(validate_stat_time=time.time() - t0)
# When we are using a fine-grained cache, we want our initial
# build() to load all of the cache information and then do a
# fine-grained incremental update to catch anything that has
# changed since the cache was generated. We *don't* want to do a
# coarse-grained incremental rebuild, so we accept the cache
# metadata even if it doesn't match the source file.
#
# We still *do* the mtime/hash checks, however, to enable
# fine-grained mode to take advantage of the mtime-updating
# optimization when mtimes differ but hashes match. There is
# essentially no extra time cost to computing the hash here, since
# it will be cached and will be needed for finding changed files
# later anyways.
fine_grained_cache = manager.use_fine_grained_cache()
size = st.st_size
# Bazel ensures the cache is valid.
if size != meta.size and not bazel and not fine_grained_cache:
manager.log(f"Metadata abandoned for {id}: file {path} has different size")
return None
# Bazel ensures the cache is valid.
mtime = 0 if bazel else int(st.st_mtime)
if not bazel and (mtime != meta.mtime or path != meta.path):
if manager.quickstart_state and path in manager.quickstart_state:
# If the mtime and the size of the file recorded in the quickstart dump matches
# what we see on disk, we know (assume) that the hash matches the quickstart
# data as well. If that hash matches the hash in the metadata, then we know
# the file is up to date even though the mtime is wrong, without needing to hash it.
qmtime, qsize, qhash = manager.quickstart_state[path]
if int(qmtime) == mtime and qsize == size and qhash == meta.hash:
manager.log(f"Metadata fresh (by quickstart) for {id}: file {path}")
meta = meta._replace(mtime=mtime, path=path)
return meta
t0 = time.time()
try:
# dir means it is a namespace package
if stat.S_ISDIR(st.st_mode):
source_hash = ""
else:
source_hash = manager.fscache.hash_digest(path)
except (OSError, UnicodeDecodeError, DecodeError):
return None
manager.add_stats(validate_hash_time=time.time() - t0)
if source_hash != meta.hash:
if fine_grained_cache:
manager.log(f"Using stale metadata for {id}: file {path}")
return meta
else:
manager.log(f"Metadata abandoned for {id}: file {path} has different hash")
return None
else:
t0 = time.time()
# Optimization: update mtime and path (otherwise, this mismatch will reappear).
meta = meta._replace(mtime=mtime, path=path)
# Construct a dict we can pass to json.dumps() (compare to write_cache()).
meta_dict = {
"id": id,
"path": path,
"mtime": mtime,
"size": size,
"hash": source_hash,
"data_mtime": meta.data_mtime,
"dependencies": meta.dependencies,
"suppressed": meta.suppressed,
"options": (manager.options.clone_for_module(id).select_options_affecting_cache()),
"dep_prios": meta.dep_prios,
"dep_lines": meta.dep_lines,
"interface_hash": meta.interface_hash,
"version_id": manager.version_id,
"ignore_all": meta.ignore_all,
"plugin_data": meta.plugin_data,
}
meta_bytes = json_dumps(meta_dict, manager.options.debug_cache)
meta_json, _, _ = get_cache_names(id, path, manager.options)
manager.log(
"Updating mtime for {}: file {}, meta {}, mtime {}".format(
id, path, meta_json, meta.mtime
)
)
t1 = time.time()
manager.metastore.write(meta_json, meta_bytes) # Ignore errors, just an optimization.
manager.add_stats(validate_update_time=time.time() - t1, validate_munging_time=t1 - t0)
return meta
# It's a match on (id, path, size, hash, mtime).
manager.log(f"Metadata fresh for {id}: file {path}")
return meta
def compute_hash(text: str) -> str:
# We use a crypto hash instead of the builtin hash(...) function
# because the output of hash(...) can differ between runs due to
# hash randomization (enabled by default in Python 3.3). See the
# note in
# https://docs.python.org/3/reference/datamodel.html#object.__hash__.
return hash_digest(text.encode("utf-8"))
def write_cache(
id: str,
path: str,
tree: MypyFile,
dependencies: list[str],
suppressed: list[str],
dep_prios: list[int],
dep_lines: list[int],
old_interface_hash: str,
source_hash: str,
ignore_all: bool,
manager: BuildManager,
) -> tuple[str, CacheMeta | None]:
"""Write cache files for a module.
Note that this mypy's behavior is still correct when any given
write_cache() call is replaced with a no-op, so error handling
code that bails without writing anything is okay.
Args:
id: module ID
path: module path
tree: the fully checked module data
dependencies: module IDs on which this module depends
suppressed: module IDs which were suppressed as dependencies
dep_prios: priorities (parallel array to dependencies)
dep_lines: import line locations (parallel array to dependencies)
old_interface_hash: the hash from the previous version of the data cache file
source_hash: the hash of the source code
ignore_all: the ignore_all flag for this module
manager: the build manager (for pyversion, log/trace)
Returns:
A tuple containing the interface hash and CacheMeta
corresponding to the metadata that was written (the latter may
be None if the cache could not be written).
"""
metastore = manager.metastore
# For Bazel we use relative paths and zero mtimes.
bazel = manager.options.bazel
# Obtain file paths.
meta_json, data_json, _ = get_cache_names(id, path, manager.options)
manager.log(f"Writing {id} {path} {meta_json} {data_json}")
# Update tree.path so that in bazel mode it's made relative (since
# sometimes paths leak out).
if bazel:
tree.path = path
# Serialize data and analyze interface
data = tree.serialize()
data_bytes = json_dumps(data, manager.options.debug_cache)
interface_hash = hash_digest(data_bytes)
plugin_data = manager.plugin.report_config_data(ReportConfigContext(id, path, is_check=False))
# Obtain and set up metadata
st = manager.get_stat(path)
if st is None:
manager.log(f"Cannot get stat for {path}")
# Remove apparently-invalid cache files.
# (This is purely an optimization.)
for filename in [data_json, meta_json]:
try:
os.remove(filename)
except OSError:
pass
# Still return the interface hash we computed.
return interface_hash, None
# Write data cache file, if applicable
# Note that for Bazel we don't record the data file's mtime.
if old_interface_hash == interface_hash:
manager.trace(f"Interface for {id} is unchanged")
else:
manager.trace(f"Interface for {id} has changed")
if not metastore.write(data_json, data_bytes):
# Most likely the error is the replace() call
# (see https://github.com/python/mypy/issues/3215).
manager.log(f"Error writing data JSON file {data_json}")
# Let's continue without writing the meta file. Analysis:
# If the replace failed, we've changed nothing except left
# behind an extraneous temporary file; if the replace
# worked but the getmtime() call failed, the meta file
# will be considered invalid on the next run because the
# data_mtime field won't match the data file's mtime.
# Both have the effect of slowing down the next run a
# little bit due to an out-of-date cache file.
return interface_hash, None
try:
data_mtime = manager.getmtime(data_json)
except OSError:
manager.log(f"Error in os.stat({data_json!r}), skipping cache write")
return interface_hash, None
mtime = 0 if bazel else int(st.st_mtime)
size = st.st_size
# Note that the options we store in the cache are the options as
# specified by the command line/config file and *don't* reflect
# updates made by inline config directives in the file. This is
# important, or otherwise the options would never match when
# verifying the cache.
options = manager.options.clone_for_module(id)
assert source_hash is not None
meta = {
"id": id,
"path": path,
"mtime": mtime,
"size": size,
"hash": source_hash,
"data_mtime": data_mtime,
"dependencies": dependencies,
"suppressed": suppressed,
"options": options.select_options_affecting_cache(),
"dep_prios": dep_prios,
"dep_lines": dep_lines,
"interface_hash": interface_hash,
"version_id": manager.version_id,
"ignore_all": ignore_all,
"plugin_data": plugin_data,
}
# Write meta cache file
meta_str = json_dumps(meta, manager.options.debug_cache)
if not metastore.write(meta_json, meta_str):
# Most likely the error is the replace() call
# (see https://github.com/python/mypy/issues/3215).
# The next run will simply find the cache entry out of date.
manager.log(f"Error writing meta JSON file {meta_json}")
return interface_hash, cache_meta_from_dict(meta, data_json)
def delete_cache(id: str, path: str, manager: BuildManager) -> None:
"""Delete cache files for a module.
The cache files for a module are deleted when mypy finds errors there.
This avoids inconsistent states with cache files from different mypy runs,
see #4043 for an example.
"""
# We don't delete .deps files on errors, since the dependencies
# are mostly generated from other files and the metadata is
# tracked separately.
meta_path, data_path, _ = get_cache_names(id, path, manager.options)
cache_paths = [meta_path, data_path]
manager.log(f"Deleting {id} {path} {' '.join(x for x in cache_paths if x)}")
for filename in cache_paths:
try:
manager.metastore.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
manager.log(f"Error deleting cache file {filename}: {e.strerror}")
"""Dependency manager.
Design
======
Ideally
-------
A. Collapse cycles (each SCC -- strongly connected component --
becomes one "supernode").
B. Topologically sort nodes based on dependencies.
C. Process from leaves towards roots.
Wrinkles
--------
a. Need to parse source modules to determine dependencies.
b. Processing order for modules within an SCC.
c. Must order mtimes of files to decide whether to re-process; depends
on clock never resetting.
d. from P import M; checks filesystem whether module P.M exists in
filesystem.
e. Race conditions, where somebody modifies a file while we're
processing. Solved by using a FileSystemCache.
Steps
-----
1. For each explicitly given module find the source file location.
2. For each such module load and check the cache metadata, and decide
whether it's valid.
3. Now recursively (or iteratively) find dependencies and add those to
the graph:
- for cached nodes use the list of dependencies from the cache
metadata (this will be valid even if we later end up re-parsing
the same source);
- for uncached nodes parse the file and process all imports found,
taking care of (a) above.
Step 3 should also address (d) above.
Once step 3 terminates we have the entire dependency graph, and for
each module we've either loaded the cache metadata or parsed the
source code. (However, we may still need to parse those modules for
which we have cache metadata but that depend, directly or indirectly,
on at least one module for which the cache metadata is stale.)
Now we can execute steps A-C from the first section. Finding SCCs for
step A shouldn't be hard; there's a recipe here:
https://code.activestate.com/recipes/578507/. There's also a plethora
of topsort recipes, e.g. https://code.activestate.com/recipes/577413/.
For single nodes, processing is simple. If the node was cached, we
deserialize the cache data and fix up cross-references. Otherwise, we
do semantic analysis followed by type checking. We also handle (c)
above; if a module has valid cache data *but* any of its
dependencies was processed from source, then the module should be
processed from source.
A relatively simple optimization (outside SCCs) we might do in the
future is as follows: if a node's cache data is valid, but one or more
of its dependencies are out of date so we have to re-parse the node
from source, once we have fully type-checked the node, we can decide
whether its symbol table actually changed compared to the cache data
(by reading the cache data and comparing it to the data we would be
writing). If there is no change we can declare the node up to date,
and any node that depends (and for which we have cached data, and
whose other dependencies are up to date) on it won't need to be
re-parsed from source.
Import cycles
-------------
Finally we have to decide how to handle (c), import cycles. Here
we'll need a modified version of the original state machine
(build.py), but we only need to do this per SCC, and we won't have to
deal with changes to the list of nodes while we're processing it.
If all nodes in the SCC have valid cache metadata and all dependencies
outside the SCC are still valid, we can proceed as follows:
1. Load cache data for all nodes in the SCC.
2. Fix up cross-references for all nodes in the SCC.
Otherwise, the simplest (but potentially slow) way to proceed is to
invalidate all cache data in the SCC and re-parse all nodes in the SCC
from source. We can do this as follows:
1. Parse source for all nodes in the SCC.
2. Semantic analysis for all nodes in the SCC.
3. Type check all nodes in the SCC.
(If there are more passes the process is the same -- each pass should
be done for all nodes before starting the next pass for any nodes in
the SCC.)
We could process the nodes in the SCC in any order. For sentimental
reasons, I've decided to process them in the reverse order in which we
encountered them when originally constructing the graph. That's how
the old build.py deals with cycles, and at least this reproduces the
previous implementation more accurately.
Can we do better than re-parsing all nodes in the SCC when any of its
dependencies are out of date? It's doubtful. The optimization
mentioned at the end of the previous section would require re-parsing
and type-checking a node and then comparing its symbol table to the
cached data; but because the node is part of a cycle we can't
technically type-check it until the semantic analysis of all other
nodes in the cycle has completed. (This is an important issue because
Dropbox has a very large cycle in production code. But I'd like to
deal with it later.)
Additional wrinkles
-------------------
During implementation more wrinkles were found.
- When a submodule of a package (e.g. x.y) is encountered, the parent
package (e.g. x) must also be loaded, but it is not strictly a
dependency. See State.add_ancestors() below.
"""
class ModuleNotFound(Exception):
"""Control flow exception to signal that a module was not found."""
class State:
"""The state for a module.
The source is only used for the -c command line option; in that
case path is None. Otherwise source is None and path isn't.
"""
manager: BuildManager
order_counter: ClassVar[int] = 0
order: int # Order in which modules were encountered
id: str # Fully qualified module name
path: str | None = None # Path to module source
abspath: str | None = None # Absolute path to module source
xpath: str # Path or '<string>'
source: str | None = None # Module source code
source_hash: str | None = None # Hash calculated based on the source code
meta_source_hash: str | None = None # Hash of the source given in the meta, if any
meta: CacheMeta | None = None
data: str | None = None
tree: MypyFile | None = None
# We keep both a list and set of dependencies. A set because it makes it efficient to
# prevent duplicates and the list because I am afraid of changing the order of
# iteration over dependencies.
# They should be managed with add_dependency and suppress_dependency.
dependencies: list[str] # Modules directly imported by the module
dependencies_set: set[str] # The same but as a set for deduplication purposes
suppressed: list[str] # Suppressed/missing dependencies
suppressed_set: set[str] # Suppressed/missing dependencies
priorities: dict[str, int]
# Map each dependency to the line number where it is first imported
dep_line_map: dict[str, int]
# Parent package, its parent, etc.
ancestors: list[str] | None = None
# List of (path, line number) tuples giving context for import
import_context: list[tuple[str, int]]
# The State from which this module was imported, if any
caller_state: State | None = None
# If caller_state is set, the line number in the caller where the import occurred
caller_line = 0
# If True, indicate that the public interface of this module is unchanged
externally_same = True
# Contains a hash of the public interface in incremental mode
interface_hash: str = ""
# Options, specialized for this file
options: Options
# Whether to ignore all errors
ignore_all = False
# Whether the module has an error or any of its dependencies have one.
transitive_error = False
# Errors reported before semantic analysis, to allow fine-grained
# mode to keep reporting them.
early_errors: list[ErrorInfo]
# Type checker used for checking this file. Use type_checker() for
# access and to construct this on demand.
_type_checker: TypeChecker | None = None
fine_grained_deps_loaded = False
# Cumulative time spent on this file, in microseconds (for profiling stats)
time_spent_us: int = 0
# Per-line type-checking time (cumulative time spent type-checking expressions
# on a given source code line).
per_line_checking_time_ns: dict[int, int]
def __init__(
self,
id: str | None,
path: str | None,
source: str | None,
manager: BuildManager,
caller_state: State | None = None,
caller_line: int = 0,
ancestor_for: State | None = None,
root_source: bool = False,
# If `temporary` is True, this State is being created to just
# quickly parse/load the tree, without an intention to further
# process it. With this flag, any changes to external state as well
# as error reporting should be avoided.
temporary: bool = False,
) -> None:
if not temporary:
assert id or path or source is not None, "Neither id, path nor source given"
self.manager = manager
State.order_counter += 1
self.order = State.order_counter
self.caller_state = caller_state
self.caller_line = caller_line
if caller_state:
self.import_context = caller_state.import_context.copy()
self.import_context.append((caller_state.xpath, caller_line))
else:
self.import_context = []
self.id = id or "__main__"
self.options = manager.options.clone_for_module(self.id)
self.early_errors = []
self._type_checker = None
if not path and source is None:
assert id is not None
try:
path, follow_imports = find_module_and_diagnose(
manager,
id,
self.options,
caller_state,
caller_line,
ancestor_for,
root_source,
skip_diagnose=temporary,
)
except ModuleNotFound:
if not temporary:
manager.missing_modules.add(id)
raise
if follow_imports == "silent":
self.ignore_all = True
elif path and is_silent_import_module(manager, path) and not root_source:
self.ignore_all = True
self.path = path
if path:
self.abspath = os.path.abspath(path)
self.xpath = path or "<string>"
if path and source is None and self.manager.cache_enabled:
self.meta = find_cache_meta(self.id, path, manager)
# TODO: Get mtime if not cached.
if self.meta is not None:
self.interface_hash = self.meta.interface_hash
self.meta_source_hash = self.meta.hash
if path and source is None and self.manager.fscache.isdir(path):
source = ""
self.source = source
self.add_ancestors()
self.per_line_checking_time_ns = collections.defaultdict(int)
t0 = time.time()
self.meta = validate_meta(self.meta, self.id, self.path, self.ignore_all, manager)
self.manager.add_stats(validate_meta_time=time.time() - t0)
if self.meta:
# Make copies, since we may modify these and want to
# compare them to the originals later.
self.dependencies = list(self.meta.dependencies)
self.dependencies_set = set(self.dependencies)
self.suppressed = list(self.meta.suppressed)
self.suppressed_set = set(self.suppressed)
all_deps = self.dependencies + self.suppressed
assert len(all_deps) == len(self.meta.dep_prios)
self.priorities = {id: pri for id, pri in zip(all_deps, self.meta.dep_prios)}
assert len(all_deps) == len(self.meta.dep_lines)
self.dep_line_map = {id: line for id, line in zip(all_deps, self.meta.dep_lines)}
if temporary:
self.load_tree(temporary=True)
if not manager.use_fine_grained_cache():
# Special case: if there were a previously missing package imported here
# and it is not present, then we need to re-calculate dependencies.
# This is to support patterns like this:
# from missing_package import missing_module # type: ignore
# At first mypy doesn't know that `missing_module` is a module
# (it may be a variable, a class, or a function), so it is not added to
# suppressed dependencies. Therefore, when the package with module is added,
# we need to re-calculate dependencies.
# NOTE: see comment below for why we skip this in fine grained mode.
if exist_added_packages(self.suppressed, manager, self.options):
self.parse_file() # This is safe because the cache is anyway stale.
self.compute_dependencies()
else:
# When doing a fine-grained cache load, pretend we only
# know about modules that have cache information and defer
# handling new modules until the fine-grained update.
if manager.use_fine_grained_cache():
manager.log(f"Deferring module to fine-grained update {path} ({id})")
raise ModuleNotFound
# Parse the file (and then some) to get the dependencies.
self.parse_file(temporary=temporary)
self.compute_dependencies()
@property
def xmeta(self) -> CacheMeta:
assert self.meta, "missing meta on allegedly fresh module"
return self.meta
def add_ancestors(self) -> None:
if self.path is not None:
_, name = os.path.split(self.path)
base, _ = os.path.splitext(name)
if "." in base:
# This is just a weird filename, don't add anything
self.ancestors = []
return
# All parent packages are new ancestors.
ancestors = []
parent = self.id
while "." in parent:
parent, _ = parent.rsplit(".", 1)
ancestors.append(parent)
self.ancestors = ancestors
def is_fresh(self) -> bool:
"""Return whether the cache data for this file is fresh."""
# NOTE: self.dependencies may differ from
# self.meta.dependencies when a dependency is dropped due to
# suppression by silent mode. However when a suppressed
# dependency is added back we find out later in the process.
return (
self.meta is not None
and self.is_interface_fresh()
and self.dependencies == self.meta.dependencies
)
def is_interface_fresh(self) -> bool:
return self.externally_same
def mark_as_rechecked(self) -> None:
"""Marks this module as having been fully re-analyzed by the type-checker."""
self.manager.rechecked_modules.add(self.id)
def mark_interface_stale(self, *, on_errors: bool = False) -> None:
"""Marks this module as having a stale public interface, and discards the cache data."""
self.externally_same = False
if not on_errors:
self.manager.stale_modules.add(self.id)
def check_blockers(self) -> None:
"""Raise CompileError if a blocking error is detected."""
if self.manager.errors.is_blockers():
self.manager.log("Bailing due to blocking errors")
self.manager.errors.raise_error()
@contextlib.contextmanager
def wrap_context(self, check_blockers: bool = True) -> Iterator[None]:
"""Temporarily change the error import context to match this state.
Also report an internal error if an unexpected exception was raised
and raise an exception on a blocking error, unless
check_blockers is False. Skipping blocking error reporting is used
in the semantic analyzer so that we can report all blocking errors
for a file (across multiple targets) to maintain backward
compatibility.
"""
save_import_context = self.manager.errors.import_context()
self.manager.errors.set_import_context(self.import_context)
try:
yield
except CompileError:
raise
except Exception as err:
report_internal_error(
err,
self.path,
0,
self.manager.errors,
self.options,
self.manager.stdout,
self.manager.stderr,
)
self.manager.errors.set_import_context(save_import_context)
# TODO: Move this away once we've removed the old semantic analyzer?
if check_blockers:
self.check_blockers()
def load_fine_grained_deps(self) -> dict[str, set[str]]:
return self.manager.load_fine_grained_deps(self.id)
def load_tree(self, temporary: bool = False) -> None:
assert (
self.meta is not None
), "Internal error: this method must be called only for cached modules"
data = _load_json_file(
self.meta.data_json, self.manager, "Load tree ", "Could not load tree: "
)
if data is None:
return
t0 = time.time()
# TODO: Assert data file wasn't changed.
self.tree = MypyFile.deserialize(data)
t1 = time.time()
self.manager.add_stats(deserialize_time=t1 - t0)
if not temporary:
self.manager.modules[self.id] = self.tree
self.manager.add_stats(fresh_trees=1)
def fix_cross_refs(self) -> None:
assert self.tree is not None, "Internal error: method must be called on parsed file only"
# We need to set allow_missing when doing a fine grained cache
# load because we need to gracefully handle missing modules.
fixup_module(self.tree, self.manager.modules, self.options.use_fine_grained_cache)
# Methods for processing modules from source code.
def parse_file(self, *, temporary: bool = False) -> None:
"""Parse file and run first pass of semantic analysis.
Everything done here is local to the file. Don't depend on imported
modules in any way. Also record module dependencies based on imports.
"""
if self.tree is not None:
# The file was already parsed (in __init__()).
return
manager = self.manager
# Can we reuse a previously parsed AST? This avoids redundant work in daemon.
cached = self.id in manager.ast_cache
modules = manager.modules
if not cached:
manager.log(f"Parsing {self.xpath} ({self.id})")
else:
manager.log(f"Using cached AST for {self.xpath} ({self.id})")
t0 = time_ref()
with self.wrap_context():
source = self.source
self.source = None # We won't need it again.
if self.path and source is None:
try:
path = manager.maybe_swap_for_shadow_path(self.path)
source = decode_python_encoding(manager.fscache.read(path))
self.source_hash = manager.fscache.hash_digest(path)
except OSError as ioerr:
# ioerr.strerror differs for os.stat failures between Windows and
# other systems, but os.strerror(ioerr.errno) does not, so we use that.
# (We want the error messages to be platform-independent so that the
# tests have predictable output.)
raise CompileError(
[
"mypy: can't read file '{}': {}".format(
self.path, os.strerror(ioerr.errno)
)
],
module_with_blocker=self.id,
) from ioerr
except (UnicodeDecodeError, DecodeError) as decodeerr:
if self.path.endswith(".pyd"):
err = f"mypy: stubgen does not support .pyd files: '{self.path}'"
else:
err = f"mypy: can't decode file '{self.path}': {str(decodeerr)}"
raise CompileError([err], module_with_blocker=self.id) from decodeerr
elif self.path and self.manager.fscache.isdir(self.path):
source = ""
self.source_hash = ""
else:
assert source is not None
self.source_hash = compute_hash(source)
self.parse_inline_configuration(source)
if not cached:
self.tree = manager.parse_file(
self.id,
self.xpath,
source,
ignore_errors=self.ignore_all or self.options.ignore_errors,
options=self.options,
)
else:
# Reuse a cached AST
self.tree = manager.ast_cache[self.id][0]
manager.errors.set_file_ignored_lines(
self.xpath,
self.tree.ignored_lines,
self.ignore_all or self.options.ignore_errors,
)
self.time_spent_us += time_spent_us(t0)
if not cached:
# Make a copy of any errors produced during parse time so that
# fine-grained mode can repeat them when the module is
# reprocessed.
self.early_errors = list(manager.errors.error_info_map.get(self.xpath, []))
else:
self.early_errors = manager.ast_cache[self.id][1]
if not temporary:
modules[self.id] = self.tree
if not cached:
self.semantic_analysis_pass1()
if not temporary:
self.check_blockers()
manager.ast_cache[self.id] = (self.tree, self.early_errors)
def parse_inline_configuration(self, source: str) -> None:
"""Check for inline mypy: options directive and parse them."""
flags = get_mypy_comments(source)
if flags:
changes, config_errors = parse_mypy_comments(flags, self.options)
self.options = self.options.apply_changes(changes)
self.manager.errors.set_file(self.xpath, self.id, self.options)
for lineno, error in config_errors:
self.manager.errors.report(lineno, 0, error)
def semantic_analysis_pass1(self) -> None:
"""Perform pass 1 of semantic analysis, which happens immediately after parsing.
This pass can't assume that any other modules have been processed yet.
"""
options = self.options
assert self.tree is not None
t0 = time_ref()
# Do the first pass of semantic analysis: analyze the reachability
# of blocks and import statements. We must do this before
# processing imports, since this may mark some import statements as
# unreachable.
#
# TODO: This should not be considered as a semantic analysis
# pass -- it's an independent pass.
analyzer = SemanticAnalyzerPreAnalysis()
with self.wrap_context():
analyzer.visit_file(self.tree, self.xpath, self.id, options)
self.manager.errors.set_skipped_lines(self.xpath, self.tree.skipped_lines)
# TODO: Do this while constructing the AST?
self.tree.names = SymbolTable()
if not self.tree.is_stub:
# Always perform some low-key variable renaming
self.tree.accept(LimitedVariableRenameVisitor())
if options.allow_redefinition:
# Perform more renaming across the AST to allow variable redefinitions
self.tree.accept(VariableRenameVisitor())
self.time_spent_us += time_spent_us(t0)
def add_dependency(self, dep: str) -> None:
if dep not in self.dependencies_set:
self.dependencies.append(dep)
self.dependencies_set.add(dep)
if dep in self.suppressed_set:
self.suppressed.remove(dep)
self.suppressed_set.remove(dep)
def suppress_dependency(self, dep: str) -> None:
if dep in self.dependencies_set:
self.dependencies.remove(dep)
self.dependencies_set.remove(dep)
if dep not in self.suppressed_set:
self.suppressed.append(dep)
self.suppressed_set.add(dep)
def compute_dependencies(self) -> None:
"""Compute a module's dependencies after parsing it.
This is used when we parse a file that we didn't have
up-to-date cache information for. When we have an up-to-date
cache, we just use the cached info.
"""
manager = self.manager
assert self.tree is not None
# Compute (direct) dependencies.
# Add all direct imports (this is why we needed the first pass).
# Also keep track of each dependency's source line.
# Missing dependencies will be moved from dependencies to
# suppressed when they fail to be loaded in load_graph.
self.dependencies = []
self.dependencies_set = set()
self.suppressed = []
self.suppressed_set = set()
self.priorities = {} # id -> priority
self.dep_line_map = {} # id -> line
dep_entries = manager.all_imported_modules_in_file(
self.tree
) + self.manager.plugin.get_additional_deps(self.tree)
for pri, id, line in dep_entries:
self.priorities[id] = min(pri, self.priorities.get(id, PRI_ALL))
if id == self.id:
continue
self.add_dependency(id)
if id not in self.dep_line_map:
self.dep_line_map[id] = line
# Every module implicitly depends on builtins.
if self.id != "builtins":
self.add_dependency("builtins")
self.check_blockers() # Can fail due to bogus relative imports
def type_check_first_pass(self) -> None:
if self.options.semantic_analysis_only:
return
t0 = time_ref()
with self.wrap_context():
self.type_checker().check_first_pass()
self.time_spent_us += time_spent_us(t0)
def type_checker(self) -> TypeChecker:
if not self._type_checker:
assert self.tree is not None, "Internal error: must be called on parsed file only"
manager = self.manager
self._type_checker = TypeChecker(
manager.errors,
manager.modules,
self.options,
self.tree,
self.xpath,
manager.plugin,
self.per_line_checking_time_ns,
)
return self._type_checker
def type_map(self) -> dict[Expression, Type]:
# We can extract the master type map directly since at this
# point no temporary type maps can be active.
assert len(self.type_checker()._type_maps) == 1
return self.type_checker()._type_maps[0]
def type_check_second_pass(self) -> bool:
if self.options.semantic_analysis_only:
return False
t0 = time_ref()
with self.wrap_context():
result = self.type_checker().check_second_pass()
self.time_spent_us += time_spent_us(t0)
return result
def detect_possibly_undefined_vars(self) -> None:
assert self.tree is not None, "Internal error: method must be called on parsed file only"
if self.tree.is_stub:
# We skip stub files because they aren't actually executed.
return
manager = self.manager
manager.errors.set_file(self.xpath, self.tree.fullname, options=self.options)
if manager.errors.is_error_code_enabled(
codes.POSSIBLY_UNDEFINED
) or manager.errors.is_error_code_enabled(codes.USED_BEFORE_DEF):
self.tree.accept(
PossiblyUndefinedVariableVisitor(
MessageBuilder(manager.errors, manager.modules),
self.type_map(),
self.options,
self.tree.names,
)
)
def finish_passes(self) -> None:
assert self.tree is not None, "Internal error: method must be called on parsed file only"
manager = self.manager
if self.options.semantic_analysis_only:
return
t0 = time_ref()
with self.wrap_context():
# Some tests (and tools) want to look at the set of all types.
options = manager.options
if options.export_types:
manager.all_types.update(self.type_map())
# We should always patch indirect dependencies, even in full (non-incremental) builds,
# because the cache still may be written, and it must be correct.
# TODO: find a more robust way to traverse *all* relevant types?
expr_types = set(self.type_map().values())
symbol_types = set()
for _, sym, _ in self.tree.local_definitions():
if sym.type is not None:
symbol_types.add(sym.type)
if isinstance(sym.node, TypeInfo):
# TypeInfo symbols have some extra relevant types.
symbol_types.update(sym.node.bases)
if sym.node.metaclass_type:
symbol_types.add(sym.node.metaclass_type)
if sym.node.typeddict_type:
symbol_types.add(sym.node.typeddict_type)
if sym.node.tuple_type:
symbol_types.add(sym.node.tuple_type)
self._patch_indirect_dependencies(
self.type_checker().module_refs, expr_types | symbol_types
)
if self.options.dump_inference_stats:
dump_type_stats(
self.tree,
self.xpath,
modules=self.manager.modules,
inferred=True,
typemap=self.type_map(),
)
manager.report_file(self.tree, self.type_map(), self.options)
self.update_fine_grained_deps(self.manager.fg_deps)
if manager.options.export_ref_info:
write_undocumented_ref_info(
self, manager.metastore, manager.options, self.type_map()
)
self.free_state()
if not manager.options.fine_grained_incremental and not manager.options.preserve_asts:
free_tree(self.tree)
self.time_spent_us += time_spent_us(t0)
def free_state(self) -> None:
if self._type_checker:
self._type_checker.reset()
self._type_checker = None
def _patch_indirect_dependencies(self, module_refs: set[str], types: set[Type]) -> None:
assert None not in types
valid = self.valid_references()
encountered = self.manager.indirection_detector.find_modules(types) | module_refs
extra = encountered - valid
for dep in sorted(extra):
if dep not in self.manager.modules:
continue
if dep not in self.suppressed_set and dep not in self.manager.missing_modules:
self.add_dependency(dep)
self.priorities[dep] = PRI_INDIRECT
elif dep not in self.suppressed_set and dep in self.manager.missing_modules:
self.suppress_dependency(dep)
def compute_fine_grained_deps(self) -> dict[str, set[str]]:
assert self.tree is not None
if self.id in ("builtins", "typing", "types", "sys", "_typeshed"):
# We don't track changes to core parts of typeshed -- the
# assumption is that they are only changed as part of mypy
# updates, which will invalidate everything anyway. These
# will always be processed in the initial non-fine-grained
# build. Other modules may be brought in as a result of an
# fine-grained increment, and we may need these
# dependencies then to handle cyclic imports.
return {}
from mypy.server.deps import get_dependencies # Lazy import to speed up startup
return get_dependencies(
target=self.tree,
type_map=self.type_map(),
python_version=self.options.python_version,
options=self.manager.options,
)
def update_fine_grained_deps(self, deps: dict[str, set[str]]) -> None:
options = self.manager.options
if options.cache_fine_grained or options.fine_grained_incremental:
from mypy.server.deps import merge_dependencies # Lazy import to speed up startup
merge_dependencies(self.compute_fine_grained_deps(), deps)
type_state.update_protocol_deps(deps)
def valid_references(self) -> set[str]:
assert self.ancestors is not None
valid_refs = set(self.dependencies + self.suppressed + self.ancestors)
valid_refs.add(self.id)
if "os" in valid_refs:
valid_refs.add("os.path")
return valid_refs
def write_cache(self) -> None:
assert self.tree is not None, "Internal error: method must be called on parsed file only"
# We don't support writing cache files in fine-grained incremental mode.
if (
not self.path
or self.options.cache_dir == os.devnull
or self.options.fine_grained_incremental
):
if self.options.debug_serialize:
try:
self.tree.serialize()
except Exception:
print(f"Error serializing {self.id}", file=self.manager.stdout)
raise # Propagate to display traceback
return
is_errors = self.transitive_error
if is_errors:
delete_cache(self.id, self.path, self.manager)
self.meta = None
self.mark_interface_stale(on_errors=True)
return
dep_prios = self.dependency_priorities()
dep_lines = self.dependency_lines()
assert self.source_hash is not None
assert len(set(self.dependencies)) == len(
self.dependencies
), f"Duplicates in dependencies list for {self.id} ({self.dependencies})"
new_interface_hash, self.meta = write_cache(
self.id,
self.path,
self.tree,
list(self.dependencies),
list(self.suppressed),
dep_prios,
dep_lines,
self.interface_hash,
self.source_hash,
self.ignore_all,
self.manager,
)
if new_interface_hash == self.interface_hash:
self.manager.log(f"Cached module {self.id} has same interface")
else:
self.manager.log(f"Cached module {self.id} has changed interface")
self.mark_interface_stale()
self.interface_hash = new_interface_hash
def verify_dependencies(self, suppressed_only: bool = False) -> None:
"""Report errors for import targets in modules that don't exist.
If suppressed_only is set, only check suppressed dependencies.
"""
manager = self.manager
assert self.ancestors is not None
if suppressed_only:
all_deps = self.suppressed
else:
# Strip out indirect dependencies. See comment in build.load_graph().
dependencies = [
dep for dep in self.dependencies if self.priorities.get(dep) != PRI_INDIRECT
]
all_deps = dependencies + self.suppressed + self.ancestors
for dep in all_deps:
if dep in manager.modules:
continue
options = manager.options.clone_for_module(dep)
if options.ignore_missing_imports:
continue
line = self.dep_line_map.get(dep, 1)
try:
if dep in self.ancestors:
state: State | None = None
ancestor: State | None = self
else:
state, ancestor = self, None
# Called just for its side effects of producing diagnostics.
find_module_and_diagnose(
manager,
dep,
options,
caller_state=state,
caller_line=line,
ancestor_for=ancestor,
)
except (ModuleNotFound, CompileError):
# Swallow up any ModuleNotFounds or CompilerErrors while generating
# a diagnostic. CompileErrors may get generated in
# fine-grained mode when an __init__.py is deleted, if a module
# that was in that package has targets reprocessed before
# it is renamed.
pass
def dependency_priorities(self) -> list[int]:
return [self.priorities.get(dep, PRI_HIGH) for dep in self.dependencies + self.suppressed]
def dependency_lines(self) -> list[int]:
return [self.dep_line_map.get(dep, 1) for dep in self.dependencies + self.suppressed]
def generate_unused_ignore_notes(self) -> None:
if (
self.options.warn_unused_ignores
or codes.UNUSED_IGNORE in self.options.enabled_error_codes
) and codes.UNUSED_IGNORE not in self.options.disabled_error_codes:
# If this file was initially loaded from the cache, it may have suppressed
# dependencies due to imports with ignores on them. We need to generate
# those errors to avoid spuriously flagging them as unused ignores.
if self.meta:
self.verify_dependencies(suppressed_only=True)
self.manager.errors.generate_unused_ignore_errors(self.xpath)
def generate_ignore_without_code_notes(self) -> None:
if self.manager.errors.is_error_code_enabled(codes.IGNORE_WITHOUT_CODE):
self.manager.errors.generate_ignore_without_code_errors(
self.xpath, self.options.warn_unused_ignores
)
# Module import and diagnostic glue
def find_module_and_diagnose(
manager: BuildManager,
id: str,
options: Options,
caller_state: State | None = None,
caller_line: int = 0,
ancestor_for: State | None = None,
root_source: bool = False,
skip_diagnose: bool = False,
) -> tuple[str, str]:
"""Find a module by name, respecting follow_imports and producing diagnostics.
If the module is not found, then the ModuleNotFound exception is raised.
Args:
id: module to find
options: the options for the module being loaded
caller_state: the state of the importing module, if applicable
caller_line: the line number of the import
ancestor_for: the child module this is an ancestor of, if applicable
root_source: whether this source was specified on the command line
skip_diagnose: skip any error diagnosis and reporting (but ModuleNotFound is
still raised if the module is missing)
The specified value of follow_imports for a module can be overridden
if the module is specified on the command line or if it is a stub,
so we compute and return the "effective" follow_imports of the module.
Returns a tuple containing (file path, target's effective follow_imports setting)
"""
result = find_module_with_reason(id, manager)
if isinstance(result, str):
# For non-stubs, look at options.follow_imports:
# - normal (default) -> fully analyze
# - silent -> analyze but silence errors
# - skip -> don't analyze, make the type Any
follow_imports = options.follow_imports
if (
root_source # Honor top-level modules
or (
result.endswith(".pyi") # Stubs are always normal
and not options.follow_imports_for_stubs # except when they aren't
)
or id in CORE_BUILTIN_MODULES # core is always normal
):
follow_imports = "normal"
if skip_diagnose:
pass
elif follow_imports == "silent":
# Still import it, but silence non-blocker errors.
manager.log(f"Silencing {result} ({id})")
elif follow_imports == "skip" or follow_imports == "error":
# In 'error' mode, produce special error messages.
if id not in manager.missing_modules:
manager.log(f"Skipping {result} ({id})")
if follow_imports == "error":
if ancestor_for:
skipping_ancestor(manager, id, result, ancestor_for)
else:
skipping_module(manager, caller_line, caller_state, id, result)
raise ModuleNotFound
if is_silent_import_module(manager, result) and not root_source:
follow_imports = "silent"
return (result, follow_imports)
else:
# Could not find a module. Typically the reason is a
# misspelled module name, missing stub, module not in
# search path or the module has not been installed.
ignore_missing_imports = options.ignore_missing_imports
# Don't honor a global (not per-module) ignore_missing_imports
# setting for modules that used to have bundled stubs, as
# otherwise updating mypy can silently result in new false
# negatives. (Unless there are stubs but they are incomplete.)
global_ignore_missing_imports = manager.options.ignore_missing_imports
if (
is_module_from_legacy_bundled_package(id)
and global_ignore_missing_imports
and not options.ignore_missing_imports_per_module
and result is ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED
):
ignore_missing_imports = False
if skip_diagnose:
raise ModuleNotFound
if caller_state:
if not (ignore_missing_imports or in_partial_package(id, manager)):
module_not_found(manager, caller_line, caller_state, id, result)
raise ModuleNotFound
elif root_source:
# If we can't find a root source it's always fatal.
# TODO: This might hide non-fatal errors from
# root sources processed earlier.
raise CompileError([f"mypy: can't find module '{id}'"])
else:
raise ModuleNotFound
def exist_added_packages(suppressed: list[str], manager: BuildManager, options: Options) -> bool:
"""Find if there are any newly added packages that were previously suppressed.
Exclude everything not in build for follow-imports=skip.
"""
for dep in suppressed:
if dep in manager.source_set.source_modules:
# We don't need to add any special logic for this. If a module
# is added to build, importers will be invalidated by normal mechanism.
continue
path = find_module_simple(dep, manager)
if not path:
continue
if options.follow_imports == "skip" and (
not path.endswith(".pyi") or options.follow_imports_for_stubs
):
continue
if "__init__.py" in path:
# It is better to have a bit lenient test, this will only slightly reduce
# performance, while having a too strict test may affect correctness.
return True
return False
def find_module_simple(id: str, manager: BuildManager) -> str | None:
"""Find a filesystem path for module `id` or `None` if not found."""
t0 = time.time()
x = manager.find_module_cache.find_module(id, fast_path=True)
manager.add_stats(find_module_time=time.time() - t0, find_module_calls=1)
if isinstance(x, ModuleNotFoundReason):
return None
return x
def find_module_with_reason(id: str, manager: BuildManager) -> ModuleSearchResult:
"""Find a filesystem path for module `id` or the reason it can't be found."""
t0 = time.time()
x = manager.find_module_cache.find_module(id, fast_path=False)
manager.add_stats(find_module_time=time.time() - t0, find_module_calls=1)
return x
def in_partial_package(id: str, manager: BuildManager) -> bool:
"""Check if a missing module can potentially be a part of a package.
This checks if there is any existing parent __init__.pyi stub that
defines a module-level __getattr__ (a.k.a. partial stub package).
"""
while "." in id:
parent, _ = id.rsplit(".", 1)
if parent in manager.modules:
parent_mod: MypyFile | None = manager.modules[parent]
else:
# Parent is not in build, try quickly if we can find it.
try:
parent_st = State(
id=parent, path=None, source=None, manager=manager, temporary=True
)
except (ModuleNotFound, CompileError):
parent_mod = None
else:
parent_mod = parent_st.tree
if parent_mod is not None:
# Bail out soon, complete subpackage found
return parent_mod.is_partial_stub_package
id = parent
return False
def module_not_found(
manager: BuildManager,
line: int,
caller_state: State,
target: str,
reason: ModuleNotFoundReason,
) -> None:
errors = manager.errors
save_import_context = errors.import_context()
errors.set_import_context(caller_state.import_context)
errors.set_file(caller_state.xpath, caller_state.id, caller_state.options)
if target == "builtins":
errors.report(
line, 0, "Cannot find 'builtins' module. Typeshed appears broken!", blocker=True
)
errors.raise_error()
else:
daemon = manager.options.fine_grained_incremental
msg, notes = reason.error_message_templates(daemon)
if reason == ModuleNotFoundReason.NOT_FOUND:
code = codes.IMPORT_NOT_FOUND
elif (
reason == ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS
or reason == ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED
):
code = codes.IMPORT_UNTYPED
else:
code = codes.IMPORT
errors.report(line, 0, msg.format(module=target), code=code)
dist = stub_distribution_name(target)
for note in notes:
if "{stub_dist}" in note:
assert dist is not None
note = note.format(stub_dist=dist)
errors.report(line, 0, note, severity="note", only_once=True, code=code)
if reason is ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED:
assert dist is not None
manager.missing_stub_packages.add(dist)
errors.set_import_context(save_import_context)
def skipping_module(
manager: BuildManager, line: int, caller_state: State | None, id: str, path: str
) -> None:
"""Produce an error for an import ignored due to --follow_imports=error"""
assert caller_state, (id, path)
save_import_context = manager.errors.import_context()
manager.errors.set_import_context(caller_state.import_context)
manager.errors.set_file(caller_state.xpath, caller_state.id, manager.options)
manager.errors.report(line, 0, f'Import of "{id}" ignored', severity="error")
manager.errors.report(
line,
0,
"(Using --follow-imports=error, module not passed on command line)",
severity="note",
only_once=True,
)
manager.errors.set_import_context(save_import_context)
def skipping_ancestor(manager: BuildManager, id: str, path: str, ancestor_for: State) -> None:
"""Produce an error for an ancestor ignored due to --follow_imports=error"""
# TODO: Read the path (the __init__.py file) and return
# immediately if it's empty or only contains comments.
# But beware, some package may be the ancestor of many modules,
# so we'd need to cache the decision.
manager.errors.set_import_context([])
manager.errors.set_file(ancestor_for.xpath, ancestor_for.id, manager.options)
manager.errors.report(
-1, -1, f'Ancestor package "{id}" ignored', severity="error", only_once=True
)
manager.errors.report(
-1,
-1,
"(Using --follow-imports=error, submodule passed on command line)",
severity="note",
only_once=True,
)
def log_configuration(manager: BuildManager, sources: list[BuildSource]) -> None:
"""Output useful configuration information to LOG and TRACE"""
config_file = manager.options.config_file
if config_file:
config_file = os.path.abspath(config_file)
manager.log()
configuration_vars = [
("Mypy Version", __version__),
("Config File", (config_file or "Default")),
("Configured Executable", manager.options.python_executable or "None"),
("Current Executable", sys.executable),
("Cache Dir", manager.options.cache_dir),
("Compiled", str(not __file__.endswith(".py"))),
("Exclude", manager.options.exclude),
]
for conf_name, conf_value in configuration_vars:
manager.log(f"{conf_name + ':':24}{conf_value}")
for source in sources:
manager.log(f"{'Found source:':24}{source}")
# Complete list of searched paths can get very long, put them under TRACE
for path_type, paths in manager.search_paths._asdict().items():
if not paths:
manager.trace(f"No {path_type}")
continue
manager.trace(f"{path_type}:")
for pth in paths:
manager.trace(f" {pth}")
# The driver
def dispatch(sources: list[BuildSource], manager: BuildManager, stdout: TextIO) -> Graph:
log_configuration(manager, sources)
t0 = time.time()
graph = load_graph(sources, manager)
# This is a kind of unfortunate hack to work around some of fine-grained's
# fragility: if we have loaded less than 50% of the specified files from
# cache in fine-grained cache mode, load the graph again honestly.
# In this case, we just turn the cache off entirely, so we don't need
# to worry about some files being loaded and some from cache and so
# that fine-grained mode never *writes* to the cache.
if manager.use_fine_grained_cache() and len(graph) < 0.50 * len(sources):
manager.log("Redoing load_graph without cache because too much was missing")
manager.cache_enabled = False
graph = load_graph(sources, manager)
t1 = time.time()
manager.add_stats(
graph_size=len(graph),
stubs_found=sum(g.path is not None and g.path.endswith(".pyi") for g in graph.values()),
graph_load_time=(t1 - t0),
fm_cache_size=len(manager.find_module_cache.results),
)
if not graph:
print("Nothing to do?!", file=stdout)
return graph
manager.log(f"Loaded graph with {len(graph)} nodes ({t1 - t0:.3f} sec)")
if manager.options.dump_graph:
dump_graph(graph, stdout)
return graph
# Fine grained dependencies that didn't have an associated module in the build
# are serialized separately, so we read them after we load the graph.
# We need to read them both for running in daemon mode and if we are generating
# a fine-grained cache (so that we can properly update them incrementally).
# The `read_deps_cache` will also validate
# the deps cache against the loaded individual cache files.
if manager.options.cache_fine_grained or manager.use_fine_grained_cache():
t2 = time.time()
fg_deps_meta = read_deps_cache(manager, graph)
manager.add_stats(load_fg_deps_time=time.time() - t2)
if fg_deps_meta is not None:
manager.fg_deps_meta = fg_deps_meta
elif manager.stats.get("fresh_metas", 0) > 0:
# Clear the stats so we don't infinite loop because of positive fresh_metas
manager.stats.clear()
# There were some cache files read, but no fine-grained dependencies loaded.
manager.log("Error reading fine-grained dependencies cache -- aborting cache load")
manager.cache_enabled = False
manager.log("Falling back to full run -- reloading graph...")
return dispatch(sources, manager, stdout)
# If we are loading a fine-grained incremental mode cache, we
# don't want to do a real incremental reprocess of the
# graph---we'll handle it all later.
if not manager.use_fine_grained_cache():
process_graph(graph, manager)
# Update plugins snapshot.
write_plugins_snapshot(manager)
manager.old_plugins_snapshot = manager.plugins_snapshot
if manager.options.cache_fine_grained or manager.options.fine_grained_incremental:
# If we are running a daemon or are going to write cache for further fine grained use,
# then we need to collect fine grained protocol dependencies.
# Since these are a global property of the program, they are calculated after we
# processed the whole graph.
type_state.add_all_protocol_deps(manager.fg_deps)
if not manager.options.fine_grained_incremental:
rdeps = generate_deps_for_cache(manager, graph)
write_deps_cache(rdeps, manager, graph)
if manager.options.dump_deps:
# This speeds up startup a little when not using the daemon mode.
from mypy.server.deps import dump_all_dependencies
dump_all_dependencies(
manager.modules, manager.all_types, manager.options.python_version, manager.options
)
return graph
class NodeInfo:
"""Some info about a node in the graph of SCCs."""
def __init__(self, index: int, scc: list[str]) -> None:
self.node_id = "n%d" % index
self.scc = scc
self.sizes: dict[str, int] = {} # mod -> size in bytes
self.deps: dict[str, int] = {} # node_id -> pri
def dumps(self) -> str:
"""Convert to JSON string."""
total_size = sum(self.sizes.values())
return "[{}, {}, {},\n {},\n {}]".format(
json.dumps(self.node_id),
json.dumps(total_size),
json.dumps(self.scc),
json.dumps(self.sizes),
json.dumps(self.deps),
)
def dump_timing_stats(path: str, graph: Graph) -> None:
"""Dump timing stats for each file in the given graph."""
with open(path, "w") as f:
for id in sorted(graph):
f.write(f"{id} {graph[id].time_spent_us}\n")
def dump_line_checking_stats(path: str, graph: Graph) -> None:
"""Dump per-line expression type checking stats."""
with open(path, "w") as f:
for id in sorted(graph):
if not graph[id].per_line_checking_time_ns:
continue
f.write(f"{id}:\n")
for line in sorted(graph[id].per_line_checking_time_ns):
line_time = graph[id].per_line_checking_time_ns[line]
f.write(f"{line:>5} {line_time/1000:8.1f}\n")
def dump_graph(graph: Graph, stdout: TextIO | None = None) -> None:
"""Dump the graph as a JSON string to stdout.
This copies some of the work by process_graph()
(sorted_components() and order_ascc()).
"""
stdout = stdout or sys.stdout
nodes = []
sccs = sorted_components(graph)
for i, ascc in enumerate(sccs):
scc = order_ascc(graph, ascc)
node = NodeInfo(i, scc)
nodes.append(node)
inv_nodes = {} # module -> node_id
for node in nodes:
for mod in node.scc:
inv_nodes[mod] = node.node_id
for node in nodes:
for mod in node.scc:
state = graph[mod]
size = 0
if state.path:
try:
size = os.path.getsize(state.path)
except OSError:
pass
node.sizes[mod] = size
for dep in state.dependencies:
if dep in state.priorities:
pri = state.priorities[dep]
if dep in inv_nodes:
dep_id = inv_nodes[dep]
if dep_id != node.node_id and (
dep_id not in node.deps or pri < node.deps[dep_id]
):
node.deps[dep_id] = pri
print("[" + ",\n ".join(node.dumps() for node in nodes) + "\n]", file=stdout)
def load_graph(
sources: list[BuildSource],
manager: BuildManager,
old_graph: Graph | None = None,
new_modules: list[State] | None = None,
) -> Graph:
"""Given some source files, load the full dependency graph.
If an old_graph is passed in, it is used as the starting point and
modified during graph loading.
If a new_modules is passed in, any modules that are loaded are
added to the list. This is an argument and not a return value
so that the caller can access it even if load_graph fails.
As this may need to parse files, this can raise CompileError in case
there are syntax errors.
"""
graph: Graph = old_graph if old_graph is not None else {}
# The deque is used to implement breadth-first traversal.
# TODO: Consider whether to go depth-first instead. This may
# affect the order in which we process files within import cycles.
new = new_modules if new_modules is not None else []
entry_points: set[str] = set()
# Seed the graph with the initial root sources.
for bs in sources:
try:
st = State(
id=bs.module,
path=bs.path,
source=bs.text,
manager=manager,
root_source=not bs.followed,
)
except ModuleNotFound:
continue
if st.id in graph:
manager.errors.set_file(st.xpath, st.id, manager.options)
manager.errors.report(
-1,
-1,
f'Duplicate module named "{st.id}" (also at "{graph[st.id].xpath}")',
blocker=True,
)
manager.errors.report(
-1,
-1,
"See https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules "
"for more info",
severity="note",
)
manager.errors.report(
-1,
-1,
"Common resolutions include: a) using `--exclude` to avoid checking one of them, "
"b) adding `__init__.py` somewhere, c) using `--explicit-package-bases` or "
"adjusting MYPYPATH",
severity="note",
)
manager.errors.raise_error()
graph[st.id] = st
new.append(st)
entry_points.add(bs.module)
# Note: Running this each time could be slow in the daemon. If it's a problem, we
# can do more work to maintain this incrementally.
seen_files = {st.abspath: st for st in graph.values() if st.path}
# Collect dependencies. We go breadth-first.
# More nodes might get added to new as we go, but that's fine.
for st in new:
assert st.ancestors is not None
# Strip out indirect dependencies. These will be dealt with
# when they show up as direct dependencies, and there's a
# scenario where they hurt:
# - Suppose A imports B and B imports C.
# - Suppose on the next round:
# - C is deleted;
# - B is updated to remove the dependency on C;
# - A is unchanged.
# - In this case A's cached *direct* dependencies are still valid
# (since direct dependencies reflect the imports found in the source)
# but A's cached *indirect* dependency on C is wrong.
dependencies = [dep for dep in st.dependencies if st.priorities.get(dep) != PRI_INDIRECT]
if not manager.use_fine_grained_cache():
# TODO: Ideally we could skip here modules that appeared in st.suppressed
# because they are not in build with `follow-imports=skip`.
# This way we could avoid overhead of cloning options in `State.__init__()`
# below to get the option value. This is quite minor performance loss however.
added = [dep for dep in st.suppressed if find_module_simple(dep, manager)]
else:
# During initial loading we don't care about newly added modules,
# they will be taken care of during fine grained update. See also
# comment about this in `State.__init__()`.
added = []
for dep in st.ancestors + dependencies + st.suppressed:
ignored = dep in st.suppressed_set and dep not in entry_points
if ignored and dep not in added:
manager.missing_modules.add(dep)
elif dep not in graph:
try:
if dep in st.ancestors:
# TODO: Why not 'if dep not in st.dependencies' ?
# Ancestors don't have import context.
newst = State(
id=dep, path=None, source=None, manager=manager, ancestor_for=st
)
else:
newst = State(
id=dep,
path=None,
source=None,
manager=manager,
caller_state=st,
caller_line=st.dep_line_map.get(dep, 1),
)
except ModuleNotFound:
if dep in st.dependencies_set:
st.suppress_dependency(dep)
else:
if newst.path:
newst_path = os.path.abspath(newst.path)
if newst_path in seen_files:
manager.errors.report(
-1,
0,
"Source file found twice under different module names: "
'"{}" and "{}"'.format(seen_files[newst_path].id, newst.id),
blocker=True,
)
manager.errors.report(
-1,
0,
"See https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules "
"for more info",
severity="note",
)
manager.errors.report(
-1,
0,
"Common resolutions include: a) adding `__init__.py` somewhere, "
"b) using `--explicit-package-bases` or adjusting MYPYPATH",
severity="note",
)
manager.errors.raise_error()
seen_files[newst_path] = newst
assert newst.id not in graph, newst.id
graph[newst.id] = newst
new.append(newst)
if dep in graph and dep in st.suppressed_set:
# Previously suppressed file is now visible
st.add_dependency(dep)
manager.plugin.set_modules(manager.modules)
return graph
def process_graph(graph: Graph, manager: BuildManager) -> None:
"""Process everything in dependency order."""
sccs = sorted_components(graph)
manager.log("Found %d SCCs; largest has %d nodes" % (len(sccs), max(len(scc) for scc in sccs)))
fresh_scc_queue: list[list[str]] = []
# We're processing SCCs from leaves (those without further
# dependencies) to roots (those from which everything else can be
# reached).
for ascc in sccs:
# Order the SCC's nodes using a heuristic.
# Note that ascc is a set, and scc is a list.
scc = order_ascc(graph, ascc)
# Make the order of the SCC that includes 'builtins' and 'typing',
# among other things, predictable. Various things may break if
# the order changes.
if "builtins" in ascc:
scc = sorted(scc, reverse=True)
# If builtins is in the list, move it last. (This is a bit of
# a hack, but it's necessary because the builtins module is
# part of a small cycle involving at least {builtins, abc,
# typing}. Of these, builtins must be processed last or else
# some builtin objects will be incompletely processed.)
scc.remove("builtins")
scc.append("builtins")
if manager.options.verbosity >= 2:
for id in scc:
manager.trace(
f"Priorities for {id}:",
" ".join(
"%s:%d" % (x, graph[id].priorities[x])
for x in graph[id].dependencies
if x in ascc and x in graph[id].priorities
),
)
# Because the SCCs are presented in topological sort order, we
# don't need to look at dependencies recursively for staleness
# -- the immediate dependencies are sufficient.
stale_scc = {id for id in scc if not graph[id].is_fresh()}
fresh = not stale_scc
deps = set()
for id in scc:
deps.update(graph[id].dependencies)
deps -= ascc
stale_deps = {id for id in deps if id in graph and not graph[id].is_interface_fresh()}
fresh = fresh and not stale_deps
undeps = set()
if fresh:
# Check if any dependencies that were suppressed according
# to the cache have been added back in this run.
# NOTE: Newly suppressed dependencies are handled by is_fresh().
for id in scc:
undeps.update(graph[id].suppressed)
undeps &= graph.keys()
if undeps:
fresh = False
if fresh:
# All cache files are fresh. Check that no dependency's
# cache file is newer than any scc node's cache file.
oldest_in_scc = min(graph[id].xmeta.data_mtime for id in scc)
viable = {id for id in stale_deps if graph[id].meta is not None}
newest_in_deps = (
0 if not viable else max(graph[dep].xmeta.data_mtime for dep in viable)
)
if manager.options.verbosity >= 3: # Dump all mtimes for extreme debugging.
all_ids = sorted(ascc | viable, key=lambda id: graph[id].xmeta.data_mtime)
for id in all_ids:
if id in scc:
if graph[id].xmeta.data_mtime < newest_in_deps:
key = "*id:"
else:
key = "id:"
else:
if graph[id].xmeta.data_mtime > oldest_in_scc:
key = "+dep:"
else:
key = "dep:"
manager.trace(" %5s %.0f %s" % (key, graph[id].xmeta.data_mtime, id))
# If equal, give the benefit of the doubt, due to 1-sec time granularity
# (on some platforms).
if oldest_in_scc < newest_in_deps:
fresh = False
fresh_msg = f"out of date by {newest_in_deps - oldest_in_scc:.0f} seconds"
else:
fresh_msg = "fresh"
elif undeps:
fresh_msg = f"stale due to changed suppression ({' '.join(sorted(undeps))})"
elif stale_scc:
fresh_msg = "inherently stale"
if stale_scc != ascc:
fresh_msg += f" ({' '.join(sorted(stale_scc))})"
if stale_deps:
fresh_msg += f" with stale deps ({' '.join(sorted(stale_deps))})"
else:
fresh_msg = f"stale due to deps ({' '.join(sorted(stale_deps))})"
# Initialize transitive_error for all SCC members from union
# of transitive_error of dependencies.
if any(graph[dep].transitive_error for dep in deps if dep in graph):
for id in scc:
graph[id].transitive_error = True
scc_str = " ".join(scc)
if fresh:
manager.trace(f"Queuing {fresh_msg} SCC ({scc_str})")
fresh_scc_queue.append(scc)
else:
if fresh_scc_queue:
manager.log(f"Processing {len(fresh_scc_queue)} queued fresh SCCs")
# Defer processing fresh SCCs until we actually run into a stale SCC
# and need the earlier modules to be loaded.
#
# Note that `process_graph` may end with us not having processed every
# single fresh SCC. This is intentional -- we don't need those modules
# loaded if there are no more stale SCCs to be rechecked.
#
# Also note we shouldn't have to worry about transitive_error here,
# since modules with transitive errors aren't written to the cache,
# and if any dependencies were changed, this SCC would be stale.
# (Also, in quick_and_dirty mode we don't care about transitive errors.)
#
# TODO: see if it's possible to determine if we need to process only a
# _subset_ of the past SCCs instead of having to process them all.
for prev_scc in fresh_scc_queue:
process_fresh_modules(graph, prev_scc, manager)
fresh_scc_queue = []
size = len(scc)
if size == 1:
manager.log(f"Processing SCC singleton ({scc_str}) as {fresh_msg}")
else:
manager.log("Processing SCC of size %d (%s) as %s" % (size, scc_str, fresh_msg))
process_stale_scc(graph, scc, manager)
sccs_left = len(fresh_scc_queue)
nodes_left = sum(len(scc) for scc in fresh_scc_queue)
manager.add_stats(sccs_left=sccs_left, nodes_left=nodes_left)
if sccs_left:
manager.log(
"{} fresh SCCs ({} nodes) left in queue (and will remain unprocessed)".format(
sccs_left, nodes_left
)
)
manager.trace(str(fresh_scc_queue))
else:
manager.log("No fresh SCCs left in queue")
def order_ascc(graph: Graph, ascc: AbstractSet[str], pri_max: int = PRI_ALL) -> list[str]:
"""Come up with the ideal processing order within an SCC.
Using the priorities assigned by all_imported_modules_in_file(),
try to reduce the cycle to a DAG, by omitting arcs representing
dependencies of lower priority.
In the simplest case, if we have A <--> B where A has a top-level
"import B" (medium priority) but B only has the reverse "import A"
inside a function (low priority), we turn the cycle into a DAG by
dropping the B --> A arc, which leaves only A --> B.
If all arcs have the same priority, we fall back to sorting by
reverse global order (the order in which modules were first
encountered).
The algorithm is recursive, as follows: when as arcs of different
priorities are present, drop all arcs of the lowest priority,
identify SCCs in the resulting graph, and apply the algorithm to
each SCC thus found. The recursion is bounded because at each
recursion the spread in priorities is (at least) one less.
In practice there are only a few priority levels (less than a
dozen) and in the worst case we just carry out the same algorithm
for finding SCCs N times. Thus the complexity is no worse than
the complexity of the original SCC-finding algorithm -- see
strongly_connected_components() below for a reference.
"""
if len(ascc) == 1:
return list(ascc)
pri_spread = set()
for id in ascc:
state = graph[id]
for dep in state.dependencies:
if dep in ascc:
pri = state.priorities.get(dep, PRI_HIGH)
if pri < pri_max:
pri_spread.add(pri)
if len(pri_spread) == 1:
# Filtered dependencies are uniform -- order by global order.
return sorted(ascc, key=lambda id: -graph[id].order)
pri_max = max(pri_spread)
sccs = sorted_components(graph, ascc, pri_max)
# The recursion is bounded by the len(pri_spread) check above.
return [s for ss in sccs for s in order_ascc(graph, ss, pri_max)]
def process_fresh_modules(graph: Graph, modules: list[str], manager: BuildManager) -> None:
"""Process the modules in one group of modules from their cached data.
This can be used to process an SCC of modules
This involves loading the tree from JSON and then doing various cleanups.
"""
t0 = time.time()
for id in modules:
graph[id].load_tree()
t1 = time.time()
for id in modules:
graph[id].fix_cross_refs()
t2 = time.time()
manager.add_stats(process_fresh_time=t2 - t0, load_tree_time=t1 - t0)
def process_stale_scc(graph: Graph, scc: list[str], manager: BuildManager) -> None:
"""Process the modules in one SCC from source code.
Exception: If quick_and_dirty is set, use the cache for fresh modules.
"""
stale = scc
for id in stale:
# We may already have parsed the module, or not.
# If the former, parse_file() is a no-op.
graph[id].parse_file()
if "typing" in scc:
# For historical reasons we need to manually add typing aliases
# for built-in generic collections, see docstring of
# SemanticAnalyzerPass2.add_builtin_aliases for details.
typing_mod = graph["typing"].tree
assert typing_mod, "The typing module was not parsed"
mypy.semanal_main.semantic_analysis_for_scc(graph, scc, manager.errors)
# Track what modules aren't yet done so we can finish them as soon
# as possible, saving memory.
unfinished_modules = set(stale)
for id in stale:
graph[id].type_check_first_pass()
if not graph[id].type_checker().deferred_nodes:
unfinished_modules.discard(id)
graph[id].detect_possibly_undefined_vars()
graph[id].finish_passes()
while unfinished_modules:
for id in stale:
if id not in unfinished_modules:
continue
if not graph[id].type_check_second_pass():
unfinished_modules.discard(id)
graph[id].detect_possibly_undefined_vars()
graph[id].finish_passes()
for id in stale:
graph[id].generate_unused_ignore_notes()
graph[id].generate_ignore_without_code_notes()
if any(manager.errors.is_errors_for_file(graph[id].xpath) for id in stale):
for id in stale:
graph[id].transitive_error = True
for id in stale:
if graph[id].xpath not in manager.errors.ignored_files:
errors = manager.errors.file_messages(
graph[id].xpath, formatter=manager.error_formatter
)
manager.flush_errors(manager.errors.simplify_path(graph[id].xpath), errors, False)
graph[id].write_cache()
graph[id].mark_as_rechecked()
def sorted_components(
graph: Graph, vertices: AbstractSet[str] | None = None, pri_max: int = PRI_ALL
) -> list[AbstractSet[str]]:
"""Return the graph's SCCs, topologically sorted by dependencies.
The sort order is from leaves (nodes without dependencies) to
roots (nodes on which no other nodes depend).
This works for a subset of the full dependency graph too;
dependencies that aren't present in graph.keys() are ignored.
"""
# Compute SCCs.
if vertices is None:
vertices = set(graph)
edges = {id: deps_filtered(graph, vertices, id, pri_max) for id in vertices}
sccs = list(strongly_connected_components(vertices, edges))
# Topsort.
res = []
for ready in topsort(prepare_sccs(sccs, edges)):
# Sort the sets in ready by reversed smallest State.order. Examples:
#
# - If ready is [{x}, {y}], x.order == 1, y.order == 2, we get
# [{y}, {x}].
#
# - If ready is [{a, b}, {c, d}], a.order == 1, b.order == 3,
# c.order == 2, d.order == 4, the sort keys become [1, 2]
# and the result is [{c, d}, {a, b}].
res.extend(sorted(ready, key=lambda scc: -min(graph[id].order for id in scc)))
return res
def deps_filtered(graph: Graph, vertices: AbstractSet[str], id: str, pri_max: int) -> list[str]:
"""Filter dependencies for id with pri < pri_max."""
if id not in vertices:
return []
state = graph[id]
return [
dep
for dep in state.dependencies
if dep in vertices and state.priorities.get(dep, PRI_HIGH) < pri_max
]
def missing_stubs_file(cache_dir: str) -> str:
return os.path.join(cache_dir, "missing_stubs")
def record_missing_stub_packages(cache_dir: str, missing_stub_packages: set[str]) -> None:
"""Write a file containing missing stub packages.
This allows a subsequent "mypy --install-types" run (without other arguments)
to install missing stub packages.
"""
fnam = missing_stubs_file(cache_dir)
if missing_stub_packages:
with open(fnam, "w") as f:
for pkg in sorted(missing_stub_packages):
f.write(f"{pkg}\n")
else:
if os.path.isfile(fnam):
os.remove(fnam)
def is_silent_import_module(manager: BuildManager, path: str) -> bool:
if manager.options.no_silence_site_packages:
return False
# Silence errors in site-package dirs and typeshed
if any(is_sub_path_normabs(path, dir) for dir in manager.search_paths.package_path):
return True
return any(is_sub_path_normabs(path, dir) for dir in manager.search_paths.typeshed_path)
def write_undocumented_ref_info(
state: State, metastore: MetadataStore, options: Options, type_map: dict[Expression, Type]
) -> None:
# This exports some dependency information in a rather ad-hoc fashion, which
# can be helpful for some tools. This is all highly experimental and could be
# removed at any time.
from mypy.refinfo import get_undocumented_ref_info_json
if not state.tree:
# We need a full AST for this.
return
_, data_file, _ = get_cache_names(state.id, state.xpath, options)
ref_info_file = ".".join(data_file.split(".")[:-2]) + ".refs.json"
assert not ref_info_file.startswith(".")
deps_json = get_undocumented_ref_info_json(state.tree, type_map)
metastore.write(ref_info_file, json_dumps(deps_json))
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/build.py
|
Python
|
NOASSERTION
| 144,775 |
"""Mypy type checker."""
from __future__ import annotations
import itertools
from collections import defaultdict
from contextlib import ExitStack, contextmanager
from typing import (
AbstractSet,
Callable,
Dict,
Final,
Generic,
Iterable,
Iterator,
Mapping,
NamedTuple,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import TypeAlias as _TypeAlias
import mypy.checkexpr
from mypy import errorcodes as codes, join, message_registry, nodes, operators
from mypy.binder import ConditionalTypeBinder, Frame, get_declaration
from mypy.checkmember import (
MemberContext,
analyze_decorator_or_funcbase_access,
analyze_descriptor_access,
analyze_member_access,
type_object_type,
)
from mypy.checkpattern import PatternChecker
from mypy.constraints import SUPERTYPE_OF
from mypy.erasetype import erase_type, erase_typevars, remove_instance_last_known_values
from mypy.errorcodes import TYPE_VAR, UNUSED_AWAITABLE, UNUSED_COROUTINE, ErrorCode
from mypy.errors import Errors, ErrorWatcher, report_internal_error
from mypy.expandtype import expand_self_type, expand_type, expand_type_by_instance
from mypy.literals import Key, extract_var_from_literal_hash, literal, literal_hash
from mypy.maptype import map_instance_to_supertype
from mypy.meet import is_overlapping_erased_types, is_overlapping_types, meet_types
from mypy.message_registry import ErrorMessage
from mypy.messages import (
SUGGESTED_TEST_FIXTURES,
MessageBuilder,
append_invariance_notes,
append_union_note,
format_type,
format_type_bare,
format_type_distinctly,
make_inferred_type_note,
pretty_seq,
)
from mypy.mro import MroError, calculate_mro
from mypy.nodes import (
ARG_NAMED,
ARG_POS,
ARG_STAR,
CONTRAVARIANT,
COVARIANT,
FUNC_NO_INFO,
GDEF,
IMPLICITLY_ABSTRACT,
INVARIANT,
IS_ABSTRACT,
LDEF,
LITERAL_TYPE,
MDEF,
NOT_ABSTRACT,
AssertStmt,
AssignmentExpr,
AssignmentStmt,
Block,
BreakStmt,
BytesExpr,
CallExpr,
ClassDef,
ComparisonExpr,
Context,
ContinueStmt,
Decorator,
DelStmt,
EllipsisExpr,
Expression,
ExpressionStmt,
FloatExpr,
ForStmt,
FuncBase,
FuncDef,
FuncItem,
IfStmt,
Import,
ImportAll,
ImportBase,
ImportFrom,
IndexExpr,
IntExpr,
LambdaExpr,
ListExpr,
Lvalue,
MatchStmt,
MemberExpr,
MypyFile,
NameExpr,
Node,
OperatorAssignmentStmt,
OpExpr,
OverloadedFuncDef,
PassStmt,
PromoteExpr,
RaiseStmt,
RefExpr,
ReturnStmt,
StarExpr,
Statement,
StrExpr,
SymbolNode,
SymbolTable,
SymbolTableNode,
TempNode,
TryStmt,
TupleExpr,
TypeAlias,
TypeAliasStmt,
TypeInfo,
TypeVarExpr,
UnaryExpr,
Var,
WhileStmt,
WithStmt,
YieldExpr,
is_final_node,
)
from mypy.operators import flip_ops, int_op_to_method, neg_ops
from mypy.options import PRECISE_TUPLE_TYPES, Options
from mypy.patterns import AsPattern, StarredPattern
from mypy.plugin import CheckerPluginInterface, Plugin
from mypy.plugins import dataclasses as dataclasses_plugin
from mypy.scope import Scope
from mypy.semanal import is_trivial_body, refers_to_fullname, set_callable_name
from mypy.semanal_enum import ENUM_BASES, ENUM_SPECIAL_PROPS
from mypy.sharedparse import BINARY_MAGIC_METHODS
from mypy.state import state
from mypy.subtypes import (
find_member,
infer_class_variances,
is_callable_compatible,
is_equivalent,
is_more_precise,
is_proper_subtype,
is_same_type,
is_subtype,
restrict_subtype_away,
unify_generic_callable,
)
from mypy.traverser import TraverserVisitor, all_return_statements, has_return_statement
from mypy.treetransform import TransformVisitor
from mypy.typeanal import check_for_explicit_any, has_any_from_unimported_type, make_optional_type
from mypy.typeops import (
bind_self,
coerce_to_literal,
custom_special_method,
erase_def_to_union_or_bound,
erase_to_bound,
erase_to_union_or_bound,
false_only,
fixup_partial_type,
function_type,
is_literal_type_like,
is_singleton_type,
make_simplified_union,
map_type_from_supertype,
true_only,
try_expanding_sum_type_to_union,
try_getting_int_literals_from_type,
try_getting_str_literals,
try_getting_str_literals_from_type,
tuple_fallback,
)
from mypy.types import (
ANY_STRATEGY,
MYPYC_NATIVE_INT_NAMES,
OVERLOAD_NAMES,
AnyType,
BoolTypeQuery,
CallableType,
DeletedType,
ErasedType,
FunctionLike,
Instance,
LiteralType,
NoneType,
Overloaded,
PartialType,
ProperType,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeGuardedType,
TypeOfAny,
TypeTranslator,
TypeType,
TypeVarId,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
find_unpack_in_list,
flatten_nested_unions,
get_proper_type,
get_proper_types,
is_literal_type,
is_named_instance,
)
from mypy.types_utils import is_overlapping_none, remove_optional, store_argument_type, strip_type
from mypy.typetraverser import TypeTraverserVisitor
from mypy.typevars import fill_typevars, fill_typevars_with_any, has_no_typevars
from mypy.util import is_dunder, is_sunder
from mypy.visitor import NodeVisitor
T = TypeVar("T")
DEFAULT_LAST_PASS: Final = 1 # Pass numbers start at 0
# Maximum length of fixed tuple types inferred when narrowing from variadic tuples.
MAX_PRECISE_TUPLE_SIZE: Final = 8
DeferredNodeType: _TypeAlias = Union[FuncDef, LambdaExpr, OverloadedFuncDef, Decorator]
FineGrainedDeferredNodeType: _TypeAlias = Union[FuncDef, MypyFile, OverloadedFuncDef]
# A node which is postponed to be processed during the next pass.
# In normal mode one can defer functions and methods (also decorated and/or overloaded)
# and lambda expressions. Nested functions can't be deferred -- only top-level functions
# and methods of classes not defined within a function can be deferred.
class DeferredNode(NamedTuple):
node: DeferredNodeType
# And its TypeInfo (for semantic analysis self type handling
active_typeinfo: TypeInfo | None
# Same as above, but for fine-grained mode targets. Only top-level functions/methods
# and module top levels are allowed as such.
class FineGrainedDeferredNode(NamedTuple):
node: FineGrainedDeferredNodeType
active_typeinfo: TypeInfo | None
# Data structure returned by find_isinstance_check representing
# information learned from the truth or falsehood of a condition. The
# dict maps nodes representing expressions like 'a[0].x' to their
# refined types under the assumption that the condition has a
# particular truth value. A value of None means that the condition can
# never have that truth value.
# NB: The keys of this dict are nodes in the original source program,
# which are compared by reference equality--effectively, being *the
# same* expression of the program, not just two identical expressions
# (such as two references to the same variable). TODO: it would
# probably be better to have the dict keyed by the nodes' literal_hash
# field instead.
TypeMap: _TypeAlias = Optional[Dict[Expression, Type]]
# An object that represents either a precise type or a type with an upper bound;
# it is important for correct type inference with isinstance.
class TypeRange(NamedTuple):
item: Type
is_upper_bound: bool # False => precise type
# Keeps track of partial types in a single scope. In fine-grained incremental
# mode partial types initially defined at the top level cannot be completed in
# a function, and we use the 'is_function' attribute to enforce this.
class PartialTypeScope(NamedTuple):
map: dict[Var, Context]
is_function: bool
is_local: bool
class TypeChecker(NodeVisitor[None], CheckerPluginInterface):
"""Mypy type checker.
Type check mypy source files that have been semantically analyzed.
You must create a separate instance for each source file.
"""
# Are we type checking a stub?
is_stub = False
# Error message reporter
errors: Errors
# Utility for generating messages
msg: MessageBuilder
# Types of type checked nodes. The first item is the "master" type
# map that will store the final, exported types. Additional items
# are temporary type maps used during type inference, and these
# will be eventually popped and either discarded or merged into
# the master type map.
#
# Avoid accessing this directly, but prefer the lookup_type(),
# has_type() etc. helpers instead.
_type_maps: list[dict[Expression, Type]]
# Helper for managing conditional types
binder: ConditionalTypeBinder
# Helper for type checking expressions
expr_checker: mypy.checkexpr.ExpressionChecker
pattern_checker: PatternChecker
tscope: Scope
scope: CheckerScope
# Stack of function return types
return_types: list[Type]
# Flags; true for dynamically typed functions
dynamic_funcs: list[bool]
# Stack of collections of variables with partial types
partial_types: list[PartialTypeScope]
# Vars for which partial type errors are already reported
# (to avoid logically duplicate errors with different error context).
partial_reported: set[Var]
globals: SymbolTable
modules: dict[str, MypyFile]
# Nodes that couldn't be checked because some types weren't available. We'll run
# another pass and try these again.
deferred_nodes: list[DeferredNode]
# Type checking pass number (0 = first pass)
pass_num = 0
# Last pass number to take
last_pass = DEFAULT_LAST_PASS
# Have we deferred the current function? If yes, don't infer additional
# types during this pass within the function.
current_node_deferred = False
# Is this file a typeshed stub?
is_typeshed_stub = False
options: Options
# Used for collecting inferred attribute types so that they can be checked
# for consistency.
inferred_attribute_types: dict[Var, Type] | None = None
# Don't infer partial None types if we are processing assignment from Union
no_partial_types: bool = False
# The set of all dependencies (suppressed or not) that this module accesses, either
# directly or indirectly.
module_refs: set[str]
# A map from variable nodes to a snapshot of the frame ids of the
# frames that were active when the variable was declared. This can
# be used to determine nearest common ancestor frame of a variable's
# declaration and the current frame, which lets us determine if it
# was declared in a different branch of the same `if` statement
# (if that frame is a conditional_frame).
var_decl_frames: dict[Var, set[int]]
# Plugin that provides special type checking rules for specific library
# functions such as open(), etc.
plugin: Plugin
def __init__(
self,
errors: Errors,
modules: dict[str, MypyFile],
options: Options,
tree: MypyFile,
path: str,
plugin: Plugin,
per_line_checking_time_ns: dict[int, int],
) -> None:
"""Construct a type checker.
Use errors to report type check errors.
"""
self.errors = errors
self.modules = modules
self.options = options
self.tree = tree
self.path = path
self.msg = MessageBuilder(errors, modules)
self.plugin = plugin
self.tscope = Scope()
self.scope = CheckerScope(tree)
self.binder = ConditionalTypeBinder()
self.globals = tree.names
self.return_types = []
self.dynamic_funcs = []
self.partial_types = []
self.partial_reported = set()
self.var_decl_frames = {}
self.deferred_nodes = []
self._type_maps = [{}]
self.module_refs = set()
self.pass_num = 0
self.current_node_deferred = False
self.is_stub = tree.is_stub
self.is_typeshed_stub = tree.is_typeshed_file(options)
self.inferred_attribute_types = None
# If True, process function definitions. If False, don't. This is used
# for processing module top levels in fine-grained incremental mode.
self.recurse_into_functions = True
# This internal flag is used to track whether we a currently type-checking
# a final declaration (assignment), so that some errors should be suppressed.
# Should not be set manually, use get_final_context/enter_final_context instead.
# NOTE: we use the context manager to avoid "threading" an additional `is_final_def`
# argument through various `checker` and `checkmember` functions.
self._is_final_def = False
# This flag is set when we run type-check or attribute access check for the purpose
# of giving a note on possibly missing "await". It is used to avoid infinite recursion.
self.checking_missing_await = False
# While this is True, allow passing an abstract class where Type[T] is expected.
# although this is technically unsafe, this is desirable in some context, for
# example when type-checking class decorators.
self.allow_abstract_call = False
# Child checker objects for specific AST node types
self.expr_checker = mypy.checkexpr.ExpressionChecker(
self, self.msg, self.plugin, per_line_checking_time_ns
)
self.pattern_checker = PatternChecker(self, self.msg, self.plugin, options)
@property
def type_context(self) -> list[Type | None]:
return self.expr_checker.type_context
def reset(self) -> None:
"""Cleanup stale state that might be left over from a typechecking run.
This allows us to reuse TypeChecker objects in fine-grained
incremental mode.
"""
# TODO: verify this is still actually worth it over creating new checkers
self.partial_reported.clear()
self.module_refs.clear()
self.binder = ConditionalTypeBinder()
self._type_maps[1:] = []
self._type_maps[0].clear()
self.temp_type_map = None
self.expr_checker.reset()
assert self.inferred_attribute_types is None
assert self.partial_types == []
assert self.deferred_nodes == []
assert len(self.scope.stack) == 1
assert self.partial_types == []
def check_first_pass(self) -> None:
"""Type check the entire file, but defer functions with unresolved references.
Unresolved references are forward references to variables
whose types haven't been inferred yet. They may occur later
in the same file or in a different file that's being processed
later (usually due to an import cycle).
Deferred functions will be processed by check_second_pass().
"""
self.recurse_into_functions = True
with state.strict_optional_set(self.options.strict_optional):
self.errors.set_file(
self.path, self.tree.fullname, scope=self.tscope, options=self.options
)
with self.tscope.module_scope(self.tree.fullname):
with self.enter_partial_types(), self.binder.top_frame_context():
for d in self.tree.defs:
if self.binder.is_unreachable():
if not self.should_report_unreachable_issues():
break
if not self.is_noop_for_reachability(d):
self.msg.unreachable_statement(d)
break
else:
self.accept(d)
assert not self.current_node_deferred
all_ = self.globals.get("__all__")
if all_ is not None and all_.type is not None:
all_node = all_.node
assert all_node is not None
seq_str = self.named_generic_type(
"typing.Sequence", [self.named_type("builtins.str")]
)
if not is_subtype(all_.type, seq_str):
str_seq_s, all_s = format_type_distinctly(
seq_str, all_.type, options=self.options
)
self.fail(
message_registry.ALL_MUST_BE_SEQ_STR.format(str_seq_s, all_s), all_node
)
def check_second_pass(
self, todo: Sequence[DeferredNode | FineGrainedDeferredNode] | None = None
) -> bool:
"""Run second or following pass of type checking.
This goes through deferred nodes, returning True if there were any.
"""
self.recurse_into_functions = True
with state.strict_optional_set(self.options.strict_optional):
if not todo and not self.deferred_nodes:
return False
self.errors.set_file(
self.path, self.tree.fullname, scope=self.tscope, options=self.options
)
with self.tscope.module_scope(self.tree.fullname):
self.pass_num += 1
if not todo:
todo = self.deferred_nodes
else:
assert not self.deferred_nodes
self.deferred_nodes = []
done: set[DeferredNodeType | FineGrainedDeferredNodeType] = set()
for node, active_typeinfo in todo:
if node in done:
continue
# This is useful for debugging:
# print("XXX in pass %d, class %s, function %s" %
# (self.pass_num, type_name, node.fullname or node.name))
done.add(node)
with ExitStack() as stack:
if active_typeinfo:
stack.enter_context(self.tscope.class_scope(active_typeinfo))
stack.enter_context(self.scope.push_class(active_typeinfo))
self.check_partial(node)
return True
def check_partial(self, node: DeferredNodeType | FineGrainedDeferredNodeType) -> None:
if isinstance(node, MypyFile):
self.check_top_level(node)
else:
self.recurse_into_functions = True
with self.binder.top_frame_context():
if isinstance(node, LambdaExpr):
self.expr_checker.accept(node)
else:
self.accept(node)
def check_top_level(self, node: MypyFile) -> None:
"""Check only the top-level of a module, skipping function definitions."""
self.recurse_into_functions = False
with self.enter_partial_types():
with self.binder.top_frame_context():
for d in node.defs:
d.accept(self)
assert not self.current_node_deferred
# TODO: Handle __all__
def defer_node(self, node: DeferredNodeType, enclosing_class: TypeInfo | None) -> None:
"""Defer a node for processing during next type-checking pass.
Args:
node: function/method being deferred
enclosing_class: for methods, the class where the method is defined
NOTE: this can't handle nested functions/methods.
"""
# We don't freeze the entire scope since only top-level functions and methods
# can be deferred. Only module/class level scope information is needed.
# Module-level scope information is preserved in the TypeChecker instance.
self.deferred_nodes.append(DeferredNode(node, enclosing_class))
def handle_cannot_determine_type(self, name: str, context: Context) -> None:
node = self.scope.top_non_lambda_function()
if self.pass_num < self.last_pass and isinstance(node, FuncDef):
# Don't report an error yet. Just defer. Note that we don't defer
# lambdas because they are coupled to the surrounding function
# through the binder and the inferred type of the lambda, so it
# would get messy.
enclosing_class = self.scope.enclosing_class()
self.defer_node(node, enclosing_class)
# Set a marker so that we won't infer additional types in this
# function. Any inferred types could be bogus, because there's at
# least one type that we don't know.
self.current_node_deferred = True
else:
self.msg.cannot_determine_type(name, context)
def accept(self, stmt: Statement) -> None:
"""Type check a node in the given type context."""
try:
stmt.accept(self)
except Exception as err:
report_internal_error(err, self.errors.file, stmt.line, self.errors, self.options)
def accept_loop(
self,
body: Statement,
else_body: Statement | None = None,
*,
exit_condition: Expression | None = None,
) -> None:
"""Repeatedly type check a loop body until the frame doesn't change.
If exit_condition is set, assume it must be False on exit from the loop.
Then check the else_body.
"""
# The outer frame accumulates the results of all iterations
with self.binder.frame_context(can_skip=False, conditional_frame=True):
while True:
with self.binder.frame_context(can_skip=True, break_frame=2, continue_frame=1):
self.accept(body)
if not self.binder.last_pop_changed:
break
if exit_condition:
_, else_map = self.find_isinstance_check(exit_condition)
self.push_type_map(else_map)
if else_body:
self.accept(else_body)
#
# Definitions
#
def visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
if not self.recurse_into_functions:
return
with self.tscope.function_scope(defn):
self._visit_overloaded_func_def(defn)
def _visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
num_abstract = 0
if not defn.items:
# In this case we have already complained about none of these being
# valid overloads.
return
if len(defn.items) == 1:
self.fail(message_registry.MULTIPLE_OVERLOADS_REQUIRED, defn)
if defn.is_property:
# HACK: Infer the type of the property.
assert isinstance(defn.items[0], Decorator)
self.visit_decorator(defn.items[0])
for fdef in defn.items:
assert isinstance(fdef, Decorator)
if defn.is_property:
self.check_func_item(fdef.func, name=fdef.func.name, allow_empty=True)
else:
# Perform full check for real overloads to infer type of all decorated
# overload variants.
self.visit_decorator_inner(fdef, allow_empty=True)
if fdef.func.abstract_status in (IS_ABSTRACT, IMPLICITLY_ABSTRACT):
num_abstract += 1
if num_abstract not in (0, len(defn.items)):
self.fail(message_registry.INCONSISTENT_ABSTRACT_OVERLOAD, defn)
if defn.impl:
defn.impl.accept(self)
if not defn.is_property:
self.check_overlapping_overloads(defn)
if defn.type is None:
item_types = []
for item in defn.items:
assert isinstance(item, Decorator)
item_type = self.extract_callable_type(item.var.type, item)
if item_type is not None:
item_types.append(item_type)
if item_types:
defn.type = Overloaded(item_types)
# Check override validity after we analyzed current definition.
if defn.info:
found_method_base_classes = self.check_method_override(defn)
if (
defn.is_explicit_override
and not found_method_base_classes
and found_method_base_classes is not None
):
self.msg.no_overridable_method(defn.name, defn)
self.check_explicit_override_decorator(defn, found_method_base_classes, defn.impl)
self.check_inplace_operator_method(defn)
def extract_callable_type(self, inner_type: Type | None, ctx: Context) -> CallableType | None:
"""Get type as seen by an overload item caller."""
inner_type = get_proper_type(inner_type)
outer_type: CallableType | None = None
if inner_type is not None and not isinstance(inner_type, AnyType):
if isinstance(inner_type, TypeVarLikeType):
inner_type = get_proper_type(inner_type.upper_bound)
if isinstance(inner_type, TypeType):
inner_type = get_proper_type(
self.expr_checker.analyze_type_type_callee(inner_type.item, ctx)
)
if isinstance(inner_type, CallableType):
outer_type = inner_type
elif isinstance(inner_type, Instance):
inner_call = get_proper_type(
analyze_member_access(
name="__call__",
typ=inner_type,
context=ctx,
is_lvalue=False,
is_super=False,
is_operator=True,
msg=self.msg,
original_type=inner_type,
chk=self,
)
)
if isinstance(inner_call, CallableType):
outer_type = inner_call
elif isinstance(inner_type, UnionType):
union_type = make_simplified_union(inner_type.items)
if isinstance(union_type, UnionType):
items = []
for item in union_type.items:
callable_item = self.extract_callable_type(item, ctx)
if callable_item is None:
break
items.append(callable_item)
else:
joined_type = get_proper_type(join.join_type_list(items))
if isinstance(joined_type, CallableType):
outer_type = joined_type
else:
return self.extract_callable_type(union_type, ctx)
if outer_type is None:
self.msg.not_callable(inner_type, ctx)
return outer_type
def check_overlapping_overloads(self, defn: OverloadedFuncDef) -> None:
# At this point we should have set the impl already, and all remaining
# items are decorators
if self.msg.errors.file in self.msg.errors.ignored_files or (
self.is_typeshed_stub and self.options.test_env
):
# This is a little hacky, however, the quadratic check here is really expensive, this
# method has no side effects, so we should skip it if we aren't going to report
# anything. In some other places we swallow errors in stubs, but this error is very
# useful for stubs!
return
# Compute some info about the implementation (if it exists) for use below
impl_type: CallableType | None = None
if defn.impl:
if isinstance(defn.impl, FuncDef):
inner_type: Type | None = defn.impl.type
elif isinstance(defn.impl, Decorator):
inner_type = defn.impl.var.type
else:
assert False, "Impl isn't the right type"
# This can happen if we've got an overload with a different
# decorator or if the implementation is untyped -- we gave up on the types.
impl_type = self.extract_callable_type(inner_type, defn.impl)
is_descriptor_get = defn.info and defn.name == "__get__"
for i, item in enumerate(defn.items):
assert isinstance(item, Decorator)
sig1 = self.extract_callable_type(item.var.type, item)
if sig1 is None:
continue
for j, item2 in enumerate(defn.items[i + 1 :]):
assert isinstance(item2, Decorator)
sig2 = self.extract_callable_type(item2.var.type, item2)
if sig2 is None:
continue
if not are_argument_counts_overlapping(sig1, sig2):
continue
if overload_can_never_match(sig1, sig2):
self.msg.overloaded_signature_will_never_match(i + 1, i + j + 2, item2.func)
elif not is_descriptor_get:
# Note: we force mypy to check overload signatures in strict-optional mode
# so we don't incorrectly report errors when a user tries typing an overload
# that happens to have a 'if the argument is None' fallback.
#
# For example, the following is fine in strict-optional mode but would throw
# the unsafe overlap error when strict-optional is disabled:
#
# @overload
# def foo(x: None) -> int: ...
# @overload
# def foo(x: str) -> str: ...
#
# See Python 2's map function for a concrete example of this kind of overload.
current_class = self.scope.active_class()
type_vars = current_class.defn.type_vars if current_class else []
with state.strict_optional_set(True):
if is_unsafe_overlapping_overload_signatures(sig1, sig2, type_vars):
flip_note = (
j == 0
and not is_unsafe_overlapping_overload_signatures(
sig2, sig1, type_vars
)
and not overload_can_never_match(sig2, sig1)
)
self.msg.overloaded_signatures_overlap(
i + 1, i + j + 2, flip_note, item.func
)
if impl_type is not None:
assert defn.impl is not None
# This is what we want from implementation, it should accept all arguments
# of an overload, but the return types should go the opposite way.
if is_callable_compatible(
impl_type,
sig1,
is_compat=is_subtype,
is_proper_subtype=False,
is_compat_return=lambda l, r: is_subtype(r, l),
):
continue
# If the above check didn't work, we repeat some key steps in
# is_callable_compatible() to give a better error message.
# We perform a unification step that's very similar to what
# 'is_callable_compatible' does -- the only difference is that
# we check and see if the impl_type's return value is a
# *supertype* of the overload alternative, not a *subtype*.
#
# This is to match the direction the implementation's return
# needs to be compatible in.
if impl_type.variables:
impl: CallableType | None = unify_generic_callable(
# Normalize both before unifying
impl_type.with_unpacked_kwargs(),
sig1.with_unpacked_kwargs(),
ignore_return=False,
return_constraint_direction=SUPERTYPE_OF,
)
if impl is None:
self.msg.overloaded_signatures_typevar_specific(i + 1, defn.impl)
continue
else:
impl = impl_type
# Prevent extra noise from inconsistent use of @classmethod by copying
# the first arg from the method being checked against.
if sig1.arg_types and defn.info:
impl = impl.copy_modified(arg_types=[sig1.arg_types[0]] + impl.arg_types[1:])
# Is the overload alternative's arguments subtypes of the implementation's?
if not is_callable_compatible(
impl, sig1, is_compat=is_subtype, is_proper_subtype=False, ignore_return=True
):
self.msg.overloaded_signatures_arg_specific(i + 1, defn.impl)
# Is the overload alternative's return type a subtype of the implementation's?
if not (
is_subtype(sig1.ret_type, impl.ret_type)
or is_subtype(impl.ret_type, sig1.ret_type)
):
self.msg.overloaded_signatures_ret_specific(i + 1, defn.impl)
# Here's the scoop about generators and coroutines.
#
# There are two kinds of generators: classic generators (functions
# with `yield` or `yield from` in the body) and coroutines
# (functions declared with `async def`). The latter are specified
# in PEP 492 and only available in Python >= 3.5.
#
# Classic generators can be parameterized with three types:
# - ty is the Yield type (the type of y in `yield y`)
# - tc is the type reCeived by yield (the type of c in `c = yield`).
# - tr is the Return type (the type of r in `return r`)
#
# A classic generator must define a return type that's either
# `Generator[ty, tc, tr]`, Iterator[ty], or Iterable[ty] (or
# object or Any). If tc/tr are not given, both are None.
#
# A coroutine must define a return type corresponding to tr; the
# other two are unconstrained. The "external" return type (seen
# by the caller) is Awaitable[tr].
#
# In addition, there's the synthetic type AwaitableGenerator: it
# inherits from both Awaitable and Generator and can be used both
# in `yield from` and in `await`. This type is set automatically
# for functions decorated with `@types.coroutine` or
# `@asyncio.coroutine`. Its single parameter corresponds to tr.
#
# PEP 525 adds a new type, the asynchronous generator, which was
# first released in Python 3.6. Async generators are `async def`
# functions that can also `yield` values. They can be parameterized
# with two types, ty and tc, because they cannot return a value.
#
# There are several useful methods, each taking a type t and a
# flag c indicating whether it's for a generator or coroutine:
#
# - is_generator_return_type(t, c) returns whether t is a Generator,
# Iterator, Iterable (if not c), or Awaitable (if c), or
# AwaitableGenerator (regardless of c).
# - is_async_generator_return_type(t) returns whether t is an
# AsyncGenerator.
# - get_generator_yield_type(t, c) returns ty.
# - get_generator_receive_type(t, c) returns tc.
# - get_generator_return_type(t, c) returns tr.
def is_generator_return_type(self, typ: Type, is_coroutine: bool) -> bool:
"""Is `typ` a valid type for a generator/coroutine?
True if `typ` is a *supertype* of Generator or Awaitable.
Also true it it's *exactly* AwaitableGenerator (modulo type parameters).
"""
typ = get_proper_type(typ)
if is_coroutine:
# This means we're in Python 3.5 or later.
at = self.named_generic_type("typing.Awaitable", [AnyType(TypeOfAny.special_form)])
if is_subtype(at, typ):
return True
else:
any_type = AnyType(TypeOfAny.special_form)
gt = self.named_generic_type("typing.Generator", [any_type, any_type, any_type])
if is_subtype(gt, typ):
return True
return isinstance(typ, Instance) and typ.type.fullname == "typing.AwaitableGenerator"
def is_async_generator_return_type(self, typ: Type) -> bool:
"""Is `typ` a valid type for an async generator?
True if `typ` is a supertype of AsyncGenerator.
"""
try:
any_type = AnyType(TypeOfAny.special_form)
agt = self.named_generic_type("typing.AsyncGenerator", [any_type, any_type])
except KeyError:
# we're running on a version of typing that doesn't have AsyncGenerator yet
return False
return is_subtype(agt, typ)
def get_generator_yield_type(self, return_type: Type, is_coroutine: bool) -> Type:
"""Given the declared return type of a generator (t), return the type it yields (ty)."""
return_type = get_proper_type(return_type)
if isinstance(return_type, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=return_type)
elif isinstance(return_type, UnionType):
return make_simplified_union(
[self.get_generator_yield_type(item, is_coroutine) for item in return_type.items]
)
elif not self.is_generator_return_type(
return_type, is_coroutine
) and not self.is_async_generator_return_type(return_type):
# If the function doesn't have a proper Generator (or
# Awaitable) return type, anything is permissible.
return AnyType(TypeOfAny.from_error)
elif not isinstance(return_type, Instance):
# Same as above, but written as a separate branch so the typechecker can understand.
return AnyType(TypeOfAny.from_error)
elif return_type.type.fullname == "typing.Awaitable":
# Awaitable: ty is Any.
return AnyType(TypeOfAny.special_form)
elif return_type.args:
# AwaitableGenerator, Generator, AsyncGenerator, Iterator, or Iterable; ty is args[0].
ret_type = return_type.args[0]
# TODO not best fix, better have dedicated yield token
return ret_type
else:
# If the function's declared supertype of Generator has no type
# parameters (i.e. is `object`), then the yielded values can't
# be accessed so any type is acceptable. IOW, ty is Any.
# (However, see https://github.com/python/mypy/issues/1933)
return AnyType(TypeOfAny.special_form)
def get_generator_receive_type(self, return_type: Type, is_coroutine: bool) -> Type:
"""Given a declared generator return type (t), return the type its yield receives (tc)."""
return_type = get_proper_type(return_type)
if isinstance(return_type, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=return_type)
elif isinstance(return_type, UnionType):
return make_simplified_union(
[self.get_generator_receive_type(item, is_coroutine) for item in return_type.items]
)
elif not self.is_generator_return_type(
return_type, is_coroutine
) and not self.is_async_generator_return_type(return_type):
# If the function doesn't have a proper Generator (or
# Awaitable) return type, anything is permissible.
return AnyType(TypeOfAny.from_error)
elif not isinstance(return_type, Instance):
# Same as above, but written as a separate branch so the typechecker can understand.
return AnyType(TypeOfAny.from_error)
elif return_type.type.fullname == "typing.Awaitable":
# Awaitable, AwaitableGenerator: tc is Any.
return AnyType(TypeOfAny.special_form)
elif (
return_type.type.fullname in ("typing.Generator", "typing.AwaitableGenerator")
and len(return_type.args) >= 3
):
# Generator: tc is args[1].
return return_type.args[1]
elif return_type.type.fullname == "typing.AsyncGenerator" and len(return_type.args) >= 2:
return return_type.args[1]
else:
# `return_type` is a supertype of Generator, so callers won't be able to send it
# values. IOW, tc is None.
return NoneType()
def get_coroutine_return_type(self, return_type: Type) -> Type:
return_type = get_proper_type(return_type)
if isinstance(return_type, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=return_type)
assert isinstance(return_type, Instance), "Should only be called on coroutine functions."
# Note: return type is the 3rd type parameter of Coroutine.
return return_type.args[2]
def get_generator_return_type(self, return_type: Type, is_coroutine: bool) -> Type:
"""Given the declared return type of a generator (t), return the type it returns (tr)."""
return_type = get_proper_type(return_type)
if isinstance(return_type, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=return_type)
elif isinstance(return_type, UnionType):
return make_simplified_union(
[self.get_generator_return_type(item, is_coroutine) for item in return_type.items]
)
elif not self.is_generator_return_type(return_type, is_coroutine):
# If the function doesn't have a proper Generator (or
# Awaitable) return type, anything is permissible.
return AnyType(TypeOfAny.from_error)
elif not isinstance(return_type, Instance):
# Same as above, but written as a separate branch so the typechecker can understand.
return AnyType(TypeOfAny.from_error)
elif return_type.type.fullname == "typing.Awaitable" and len(return_type.args) == 1:
# Awaitable: tr is args[0].
return return_type.args[0]
elif (
return_type.type.fullname in ("typing.Generator", "typing.AwaitableGenerator")
and len(return_type.args) >= 3
):
# AwaitableGenerator, Generator: tr is args[2].
return return_type.args[2]
else:
# We have a supertype of Generator (Iterator, Iterable, object)
# Treat `Iterator[X]` as a shorthand for `Generator[X, Any, None]`.
return NoneType()
def visit_func_def(self, defn: FuncDef) -> None:
if not self.recurse_into_functions:
return
with self.tscope.function_scope(defn):
self._visit_func_def(defn)
def _visit_func_def(self, defn: FuncDef) -> None:
"""Type check a function definition."""
self.check_func_item(defn, name=defn.name)
if defn.info:
if not defn.is_overload and not defn.is_decorated:
# If the definition is the implementation for an
# overload, the legality of the override has already
# been typechecked, and decorated methods will be
# checked when the decorator is.
found_method_base_classes = self.check_method_override(defn)
self.check_explicit_override_decorator(defn, found_method_base_classes)
self.check_inplace_operator_method(defn)
if defn.original_def:
# Override previous definition.
new_type = self.function_type(defn)
if isinstance(defn.original_def, FuncDef):
# Function definition overrides function definition.
old_type = self.function_type(defn.original_def)
if not is_same_type(new_type, old_type):
self.msg.incompatible_conditional_function_def(defn, old_type, new_type)
else:
# Function definition overrides a variable initialized via assignment or a
# decorated function.
orig_type = defn.original_def.type
if orig_type is None:
# If other branch is unreachable, we don't type check it and so we might
# not have a type for the original definition
return
if isinstance(orig_type, PartialType):
if orig_type.type is None:
# Ah this is a partial type. Give it the type of the function.
orig_def = defn.original_def
if isinstance(orig_def, Decorator):
var = orig_def.var
else:
var = orig_def
partial_types = self.find_partial_types(var)
if partial_types is not None:
var.type = new_type
del partial_types[var]
else:
# Trying to redefine something like partial empty list as function.
self.fail(message_registry.INCOMPATIBLE_REDEFINITION, defn)
else:
name_expr = NameExpr(defn.name)
name_expr.node = defn.original_def
self.binder.assign_type(name_expr, new_type, orig_type)
self.check_subtype(
new_type,
orig_type,
defn,
message_registry.INCOMPATIBLE_REDEFINITION,
"redefinition with type",
"original type",
)
def check_func_item(
self,
defn: FuncItem,
type_override: CallableType | None = None,
name: str | None = None,
allow_empty: bool = False,
) -> None:
"""Type check a function.
If type_override is provided, use it as the function type.
"""
self.dynamic_funcs.append(defn.is_dynamic() and not type_override)
with self.enter_partial_types(is_function=True):
typ = self.function_type(defn)
if type_override:
typ = type_override.copy_modified(line=typ.line, column=typ.column)
if isinstance(typ, CallableType):
with self.enter_attribute_inference_context():
self.check_func_def(defn, typ, name, allow_empty)
else:
raise RuntimeError("Not supported")
self.dynamic_funcs.pop()
self.current_node_deferred = False
if name == "__exit__":
self.check__exit__return_type(defn)
# TODO: the following logic should move to the dataclasses plugin
# https://github.com/python/mypy/issues/15515
if name == "__post_init__":
if dataclasses_plugin.is_processed_dataclass(defn.info):
dataclasses_plugin.check_post_init(self, defn, defn.info)
@contextmanager
def enter_attribute_inference_context(self) -> Iterator[None]:
old_types = self.inferred_attribute_types
self.inferred_attribute_types = {}
yield None
self.inferred_attribute_types = old_types
def check_func_def(
self, defn: FuncItem, typ: CallableType, name: str | None, allow_empty: bool = False
) -> None:
"""Type check a function definition."""
# Expand type variables with value restrictions to ordinary types.
expanded = self.expand_typevars(defn, typ)
original_typ = typ
for item, typ in expanded:
old_binder = self.binder
self.binder = ConditionalTypeBinder()
with self.binder.top_frame_context():
defn.expanded.append(item)
# We may be checking a function definition or an anonymous
# function. In the first case, set up another reference with the
# precise type.
if isinstance(item, FuncDef):
fdef = item
# Check if __init__ has an invalid return type.
if (
fdef.info
and fdef.name in ("__init__", "__init_subclass__")
and not isinstance(
get_proper_type(typ.ret_type), (NoneType, UninhabitedType)
)
and not self.dynamic_funcs[-1]
):
self.fail(
message_registry.MUST_HAVE_NONE_RETURN_TYPE.format(fdef.name), item
)
# Check validity of __new__ signature
if fdef.info and fdef.name == "__new__":
self.check___new___signature(fdef, typ)
self.check_for_missing_annotations(fdef)
if self.options.disallow_any_unimported:
if fdef.type and isinstance(fdef.type, CallableType):
ret_type = fdef.type.ret_type
if has_any_from_unimported_type(ret_type):
self.msg.unimported_type_becomes_any("Return type", ret_type, fdef)
for idx, arg_type in enumerate(fdef.type.arg_types):
if has_any_from_unimported_type(arg_type):
prefix = f'Argument {idx + 1} to "{fdef.name}"'
self.msg.unimported_type_becomes_any(prefix, arg_type, fdef)
check_for_explicit_any(
fdef.type, self.options, self.is_typeshed_stub, self.msg, context=fdef
)
if name: # Special method names
if defn.info and self.is_reverse_op_method(name):
self.check_reverse_op_method(item, typ, name, defn)
elif name in ("__getattr__", "__getattribute__"):
self.check_getattr_method(typ, defn, name)
elif name == "__setattr__":
self.check_setattr_method(typ, defn)
# Refuse contravariant return type variable
if isinstance(typ.ret_type, TypeVarType):
if typ.ret_type.variance == CONTRAVARIANT:
self.fail(
message_registry.RETURN_TYPE_CANNOT_BE_CONTRAVARIANT, typ.ret_type
)
self.check_unbound_return_typevar(typ)
elif (
isinstance(original_typ.ret_type, TypeVarType) and original_typ.ret_type.values
):
# Since type vars with values are expanded, the return type is changed
# to a raw value. This is a hack to get it back.
self.check_unbound_return_typevar(original_typ)
# Check that Generator functions have the appropriate return type.
if defn.is_generator:
if defn.is_async_generator:
if not self.is_async_generator_return_type(typ.ret_type):
self.fail(
message_registry.INVALID_RETURN_TYPE_FOR_ASYNC_GENERATOR, typ
)
else:
if not self.is_generator_return_type(typ.ret_type, defn.is_coroutine):
self.fail(message_registry.INVALID_RETURN_TYPE_FOR_GENERATOR, typ)
# Fix the type if decorated with `@types.coroutine` or `@asyncio.coroutine`.
if defn.is_awaitable_coroutine:
# Update the return type to AwaitableGenerator.
# (This doesn't exist in typing.py, only in typing.pyi.)
t = typ.ret_type
c = defn.is_coroutine
ty = self.get_generator_yield_type(t, c)
tc = self.get_generator_receive_type(t, c)
if c:
tr = self.get_coroutine_return_type(t)
else:
tr = self.get_generator_return_type(t, c)
ret_type = self.named_generic_type(
"typing.AwaitableGenerator", [ty, tc, tr, t]
)
typ = typ.copy_modified(ret_type=ret_type)
defn.type = typ
# Push return type.
self.return_types.append(typ.ret_type)
with self.scope.push_function(defn):
# We temporary push the definition to get the self type as
# visible from *inside* of this function/method.
ref_type: Type | None = self.scope.active_self_type()
if typ.type_is:
arg_index = 0
# For methods and classmethods, we want the second parameter
if ref_type is not None and (not defn.is_static or defn.name == "__new__"):
arg_index = 1
if arg_index < len(typ.arg_types) and not is_subtype(
typ.type_is, typ.arg_types[arg_index]
):
self.fail(
message_registry.NARROWED_TYPE_NOT_SUBTYPE.format(
format_type(typ.type_is, self.options),
format_type(typ.arg_types[arg_index], self.options),
),
item,
)
# Store argument types.
for i in range(len(typ.arg_types)):
arg_type = typ.arg_types[i]
if (
isinstance(defn, FuncDef)
and ref_type is not None
and i == 0
and (not defn.is_static or defn.name == "__new__")
and typ.arg_kinds[0] not in [nodes.ARG_STAR, nodes.ARG_STAR2]
):
if defn.is_class or defn.name == "__new__":
ref_type = mypy.types.TypeType.make_normalized(ref_type)
if not is_same_type(arg_type, ref_type):
# This level of erasure matches the one in checkmember.check_self_arg(),
# better keep these two checks consistent.
erased = get_proper_type(erase_typevars(erase_to_bound(arg_type)))
if not is_subtype(ref_type, erased, ignore_type_params=True):
if (
isinstance(erased, Instance)
and erased.type.is_protocol
or isinstance(erased, TypeType)
and isinstance(erased.item, Instance)
and erased.item.type.is_protocol
):
# We allow the explicit self-type to be not a supertype of
# the current class if it is a protocol. For such cases
# the consistency check will be performed at call sites.
msg = None
elif typ.arg_names[i] in {"self", "cls"}:
msg = message_registry.ERASED_SELF_TYPE_NOT_SUPERTYPE.format(
erased.str_with_options(self.options),
ref_type.str_with_options(self.options),
)
else:
msg = message_registry.MISSING_OR_INVALID_SELF_TYPE
if msg:
self.fail(msg, defn)
elif isinstance(arg_type, TypeVarType):
# Refuse covariant parameter type variables
# TODO: check recursively for inner type variables
if (
arg_type.variance == COVARIANT
and defn.name not in ("__init__", "__new__", "__post_init__")
and not is_private(defn.name) # private methods are not inherited
):
ctx: Context = arg_type
if ctx.line < 0:
ctx = typ
self.fail(message_registry.FUNCTION_PARAMETER_CANNOT_BE_COVARIANT, ctx)
# Need to store arguments again for the expanded item.
store_argument_type(item, i, typ, self.named_generic_type)
# Type check initialization expressions.
body_is_trivial = is_trivial_body(defn.body)
self.check_default_args(item, body_is_trivial)
# Type check body in a new scope.
with self.binder.top_frame_context():
# Copy some type narrowings from an outer function when it seems safe enough
# (i.e. we can't find an assignment that might change the type of the
# variable afterwards).
new_frame: Frame | None = None
for frame in old_binder.frames:
for key, narrowed_type in frame.types.items():
key_var = extract_var_from_literal_hash(key)
if key_var is not None and not self.is_var_redefined_in_outer_context(
key_var, defn.line
):
# It seems safe to propagate the type narrowing to a nested scope.
if new_frame is None:
new_frame = self.binder.push_frame()
new_frame.types[key] = narrowed_type
self.binder.declarations[key] = old_binder.declarations[key]
with self.scope.push_function(defn):
# We suppress reachability warnings for empty generator functions
# (return; yield) which have a "yield" that's unreachable by definition
# since it's only there to promote the function into a generator function.
#
# We also suppress reachability warnings when we use TypeVars with value
# restrictions: we only want to report a warning if a certain statement is
# marked as being suppressed in *all* of the expansions, but we currently
# have no good way of doing this.
#
# TODO: Find a way of working around this limitation
if _is_empty_generator_function(item) or len(expanded) >= 2:
self.binder.suppress_unreachable_warnings()
self.accept(item.body)
unreachable = self.binder.is_unreachable()
if new_frame is not None:
self.binder.pop_frame(True, 0)
if not unreachable:
if defn.is_generator or is_named_instance(
self.return_types[-1], "typing.AwaitableGenerator"
):
return_type = self.get_generator_return_type(
self.return_types[-1], defn.is_coroutine
)
elif defn.is_coroutine:
return_type = self.get_coroutine_return_type(self.return_types[-1])
else:
return_type = self.return_types[-1]
return_type = get_proper_type(return_type)
allow_empty = allow_empty or self.options.allow_empty_bodies
show_error = (
not body_is_trivial
or
# Allow empty bodies for abstract methods, overloads, in tests and stubs.
(
not allow_empty
and not (
isinstance(defn, FuncDef) and defn.abstract_status != NOT_ABSTRACT
)
and not self.is_stub
)
)
# Ignore plugin generated methods, these usually don't need any bodies.
if defn.info is not FUNC_NO_INFO and (
defn.name not in defn.info.names or defn.info.names[defn.name].plugin_generated
):
show_error = False
# Ignore also definitions that appear in `if TYPE_CHECKING: ...` blocks.
# These can't be called at runtime anyway (similar to plugin-generated).
if isinstance(defn, FuncDef) and defn.is_mypy_only:
show_error = False
# We want to minimize the fallout from checking empty bodies
# that was absent in many mypy versions.
if body_is_trivial and is_subtype(NoneType(), return_type):
show_error = False
may_be_abstract = (
body_is_trivial
and defn.info is not FUNC_NO_INFO
and defn.info.metaclass_type is not None
and defn.info.metaclass_type.type.has_base("abc.ABCMeta")
)
if self.options.warn_no_return:
if (
not self.current_node_deferred
and not isinstance(return_type, (NoneType, AnyType))
and show_error
):
# Control flow fell off the end of a function that was
# declared to return a non-None type.
if isinstance(return_type, UninhabitedType):
# This is a NoReturn function
msg = message_registry.INVALID_IMPLICIT_RETURN
else:
msg = message_registry.MISSING_RETURN_STATEMENT
if body_is_trivial:
msg = msg._replace(code=codes.EMPTY_BODY)
self.fail(msg, defn)
if may_be_abstract:
self.note(message_registry.EMPTY_BODY_ABSTRACT, defn)
elif show_error:
msg = message_registry.INCOMPATIBLE_RETURN_VALUE_TYPE
if body_is_trivial:
msg = msg._replace(code=codes.EMPTY_BODY)
# similar to code in check_return_stmt
if (
not self.check_subtype(
subtype_label="implicitly returns",
subtype=NoneType(),
supertype_label="expected",
supertype=return_type,
context=defn,
msg=msg,
)
and may_be_abstract
):
self.note(message_registry.EMPTY_BODY_ABSTRACT, defn)
self.return_types.pop()
self.binder = old_binder
def is_var_redefined_in_outer_context(self, v: Var, after_line: int) -> bool:
"""Can the variable be assigned to at module top level or outer function?
Note that this doesn't do a full CFG analysis but uses a line number based
heuristic that isn't correct in some (rare) cases.
"""
outers = self.tscope.outer_functions()
if not outers:
# Top-level function -- outer context is top level, and we can't reason about
# globals
return True
for outer in outers:
if isinstance(outer, FuncDef):
if find_last_var_assignment_line(outer.body, v) >= after_line:
return True
return False
def check_unbound_return_typevar(self, typ: CallableType) -> None:
"""Fails when the return typevar is not defined in arguments."""
if isinstance(typ.ret_type, TypeVarType) and typ.ret_type in typ.variables:
arg_type_visitor = CollectArgTypeVarTypes()
for argtype in typ.arg_types:
argtype.accept(arg_type_visitor)
if typ.ret_type not in arg_type_visitor.arg_types:
self.fail(message_registry.UNBOUND_TYPEVAR, typ.ret_type, code=TYPE_VAR)
upper_bound = get_proper_type(typ.ret_type.upper_bound)
if not (
isinstance(upper_bound, Instance)
and upper_bound.type.fullname == "builtins.object"
):
self.note(
"Consider using the upper bound "
f"{format_type(typ.ret_type.upper_bound, self.options)} instead",
context=typ.ret_type,
)
def check_default_args(self, item: FuncItem, body_is_trivial: bool) -> None:
for arg in item.arguments:
if arg.initializer is None:
continue
if body_is_trivial and isinstance(arg.initializer, EllipsisExpr):
continue
name = arg.variable.name
msg = "Incompatible default for "
if name.startswith("__tuple_arg_"):
msg += f"tuple argument {name[12:]}"
else:
msg += f'argument "{name}"'
if (
not self.options.implicit_optional
and isinstance(arg.initializer, NameExpr)
and arg.initializer.fullname == "builtins.None"
):
notes = [
"PEP 484 prohibits implicit Optional. "
"Accordingly, mypy has changed its default to no_implicit_optional=True",
"Use https://github.com/hauntsaninja/no_implicit_optional to automatically "
"upgrade your codebase",
]
else:
notes = None
self.check_simple_assignment(
arg.variable.type,
arg.initializer,
context=arg.initializer,
msg=ErrorMessage(msg, code=codes.ASSIGNMENT),
lvalue_name="argument",
rvalue_name="default",
notes=notes,
)
def is_forward_op_method(self, method_name: str) -> bool:
return method_name in operators.reverse_op_methods
def is_reverse_op_method(self, method_name: str) -> bool:
return method_name in operators.reverse_op_method_set
def check_for_missing_annotations(self, fdef: FuncItem) -> None:
# Check for functions with unspecified/not fully specified types.
def is_unannotated_any(t: Type) -> bool:
if not isinstance(t, ProperType):
return False
return isinstance(t, AnyType) and t.type_of_any == TypeOfAny.unannotated
has_explicit_annotation = isinstance(fdef.type, CallableType) and any(
not is_unannotated_any(t) for t in fdef.type.arg_types + [fdef.type.ret_type]
)
show_untyped = not self.is_typeshed_stub or self.options.warn_incomplete_stub
check_incomplete_defs = self.options.disallow_incomplete_defs and has_explicit_annotation
if show_untyped and (self.options.disallow_untyped_defs or check_incomplete_defs):
if fdef.type is None and self.options.disallow_untyped_defs:
if not fdef.arguments or (
len(fdef.arguments) == 1
and (fdef.arg_names[0] == "self" or fdef.arg_names[0] == "cls")
):
self.fail(message_registry.RETURN_TYPE_EXPECTED, fdef)
if not has_return_statement(fdef) and not fdef.is_generator:
self.note(
'Use "-> None" if function does not return a value',
fdef,
code=codes.NO_UNTYPED_DEF,
)
else:
self.fail(message_registry.FUNCTION_TYPE_EXPECTED, fdef)
elif isinstance(fdef.type, CallableType):
ret_type = get_proper_type(fdef.type.ret_type)
if is_unannotated_any(ret_type):
self.fail(message_registry.RETURN_TYPE_EXPECTED, fdef)
elif fdef.is_generator:
if is_unannotated_any(
self.get_generator_return_type(ret_type, fdef.is_coroutine)
):
self.fail(message_registry.RETURN_TYPE_EXPECTED, fdef)
elif fdef.is_coroutine and isinstance(ret_type, Instance):
if is_unannotated_any(self.get_coroutine_return_type(ret_type)):
self.fail(message_registry.RETURN_TYPE_EXPECTED, fdef)
if any(is_unannotated_any(t) for t in fdef.type.arg_types):
self.fail(message_registry.ARGUMENT_TYPE_EXPECTED, fdef)
def check___new___signature(self, fdef: FuncDef, typ: CallableType) -> None:
self_type = fill_typevars_with_any(fdef.info)
bound_type = bind_self(typ, self_type, is_classmethod=True)
# Check that __new__ (after binding cls) returns an instance
# type (or any).
if fdef.info.is_metaclass():
# This is a metaclass, so it must return a new unrelated type.
self.check_subtype(
bound_type.ret_type,
self.type_type(),
fdef,
message_registry.INVALID_NEW_TYPE,
"returns",
"but must return a subtype of",
)
elif not isinstance(
get_proper_type(bound_type.ret_type), (AnyType, Instance, TupleType, UninhabitedType)
):
self.fail(
message_registry.NON_INSTANCE_NEW_TYPE.format(
format_type(bound_type.ret_type, self.options)
),
fdef,
)
else:
# And that it returns a subtype of the class
self.check_subtype(
bound_type.ret_type,
self_type,
fdef,
message_registry.INVALID_NEW_TYPE,
"returns",
"but must return a subtype of",
)
def check_reverse_op_method(
self, defn: FuncItem, reverse_type: CallableType, reverse_name: str, context: Context
) -> None:
"""Check a reverse operator method such as __radd__."""
# Decides whether it's worth calling check_overlapping_op_methods().
# This used to check for some very obscure scenario. It now
# just decides whether it's worth calling
# check_overlapping_op_methods().
assert defn.info
# First check for a valid signature
method_type = CallableType(
[AnyType(TypeOfAny.special_form), AnyType(TypeOfAny.special_form)],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
AnyType(TypeOfAny.special_form),
self.named_type("builtins.function"),
)
if not is_subtype(reverse_type, method_type):
self.msg.invalid_signature(reverse_type, context)
return
if reverse_name in ("__eq__", "__ne__"):
# These are defined for all objects => can't cause trouble.
return
# With 'Any' or 'object' return type we are happy, since any possible
# return value is valid.
ret_type = get_proper_type(reverse_type.ret_type)
if isinstance(ret_type, AnyType):
return
if isinstance(ret_type, Instance):
if ret_type.type.fullname == "builtins.object":
return
if reverse_type.arg_kinds[0] == ARG_STAR:
reverse_type = reverse_type.copy_modified(
arg_types=[reverse_type.arg_types[0]] * 2,
arg_kinds=[ARG_POS] * 2,
arg_names=[reverse_type.arg_names[0], "_"],
)
assert len(reverse_type.arg_types) >= 2
forward_name = operators.normal_from_reverse_op[reverse_name]
forward_inst = get_proper_type(reverse_type.arg_types[1])
if isinstance(forward_inst, TypeVarType):
forward_inst = get_proper_type(forward_inst.upper_bound)
elif isinstance(forward_inst, TupleType):
forward_inst = tuple_fallback(forward_inst)
elif isinstance(forward_inst, (FunctionLike, TypedDictType, LiteralType)):
forward_inst = forward_inst.fallback
if isinstance(forward_inst, TypeType):
item = forward_inst.item
if isinstance(item, Instance):
opt_meta = item.type.metaclass_type
if opt_meta is not None:
forward_inst = opt_meta
def has_readable_member(typ: UnionType | Instance, name: str) -> bool:
# TODO: Deal with attributes of TupleType etc.
if isinstance(typ, Instance):
return typ.type.has_readable_member(name)
return all(
(isinstance(x, UnionType) and has_readable_member(x, name))
or (isinstance(x, Instance) and x.type.has_readable_member(name))
for x in get_proper_types(typ.relevant_items())
)
if not (
isinstance(forward_inst, (Instance, UnionType))
and has_readable_member(forward_inst, forward_name)
):
return
forward_base = reverse_type.arg_types[1]
forward_type = self.expr_checker.analyze_external_member_access(
forward_name, forward_base, context=defn
)
self.check_overlapping_op_methods(
reverse_type,
reverse_name,
defn.info,
forward_type,
forward_name,
forward_base,
context=defn,
)
def check_overlapping_op_methods(
self,
reverse_type: CallableType,
reverse_name: str,
reverse_class: TypeInfo,
forward_type: Type,
forward_name: str,
forward_base: Type,
context: Context,
) -> None:
"""Check for overlapping method and reverse method signatures.
This function assumes that:
- The reverse method has valid argument count and kinds.
- If the reverse operator method accepts some argument of type
X, the forward operator method also belong to class X.
For example, if we have the reverse operator `A.__radd__(B)`, then the
corresponding forward operator must have the type `B.__add__(...)`.
"""
# Note: Suppose we have two operator methods "A.__rOP__(B) -> R1" and
# "B.__OP__(C) -> R2". We check if these two methods are unsafely overlapping
# by using the following algorithm:
#
# 1. Rewrite "B.__OP__(C) -> R1" to "temp1(B, C) -> R1"
#
# 2. Rewrite "A.__rOP__(B) -> R2" to "temp2(B, A) -> R2"
#
# 3. Treat temp1 and temp2 as if they were both variants in the same
# overloaded function. (This mirrors how the Python runtime calls
# operator methods: we first try __OP__, then __rOP__.)
#
# If the first signature is unsafely overlapping with the second,
# report an error.
#
# 4. However, if temp1 shadows temp2 (e.g. the __rOP__ method can never
# be called), do NOT report an error.
#
# This behavior deviates from how we handle overloads -- many of the
# modules in typeshed seem to define __OP__ methods that shadow the
# corresponding __rOP__ method.
#
# Note: we do not attempt to handle unsafe overlaps related to multiple
# inheritance. (This is consistent with how we handle overloads: we also
# do not try checking unsafe overlaps due to multiple inheritance there.)
for forward_item in flatten_nested_unions([forward_type]):
forward_item = get_proper_type(forward_item)
if isinstance(forward_item, CallableType):
if self.is_unsafe_overlapping_op(forward_item, forward_base, reverse_type):
self.msg.operator_method_signatures_overlap(
reverse_class, reverse_name, forward_base, forward_name, context
)
elif isinstance(forward_item, Overloaded):
for item in forward_item.items:
if self.is_unsafe_overlapping_op(item, forward_base, reverse_type):
self.msg.operator_method_signatures_overlap(
reverse_class, reverse_name, forward_base, forward_name, context
)
elif not isinstance(forward_item, AnyType):
self.msg.forward_operator_not_callable(forward_name, context)
def is_unsafe_overlapping_op(
self, forward_item: CallableType, forward_base: Type, reverse_type: CallableType
) -> bool:
# TODO: check argument kinds?
if len(forward_item.arg_types) < 1:
# Not a valid operator method -- can't succeed anyway.
return False
# Erase the type if necessary to make sure we don't have a single
# TypeVar in forward_tweaked. (Having a function signature containing
# just a single TypeVar can lead to unpredictable behavior.)
forward_base_erased = forward_base
if isinstance(forward_base, TypeVarType):
forward_base_erased = erase_to_bound(forward_base)
# Construct normalized function signatures corresponding to the
# operator methods. The first argument is the left operand and the
# second operand is the right argument -- we switch the order of
# the arguments of the reverse method.
# TODO: this manipulation is dangerous if callables are generic.
# Shuffling arguments between callables can create meaningless types.
forward_tweaked = forward_item.copy_modified(
arg_types=[forward_base_erased, forward_item.arg_types[0]],
arg_kinds=[nodes.ARG_POS] * 2,
arg_names=[None] * 2,
)
reverse_tweaked = reverse_type.copy_modified(
arg_types=[reverse_type.arg_types[1], reverse_type.arg_types[0]],
arg_kinds=[nodes.ARG_POS] * 2,
arg_names=[None] * 2,
)
reverse_base_erased = reverse_type.arg_types[0]
if isinstance(reverse_base_erased, TypeVarType):
reverse_base_erased = erase_to_bound(reverse_base_erased)
if is_same_type(reverse_base_erased, forward_base_erased):
return False
elif is_subtype(reverse_base_erased, forward_base_erased):
first = reverse_tweaked
second = forward_tweaked
else:
first = forward_tweaked
second = reverse_tweaked
current_class = self.scope.active_class()
type_vars = current_class.defn.type_vars if current_class else []
return is_unsafe_overlapping_overload_signatures(
first, second, type_vars, partial_only=False
)
def check_inplace_operator_method(self, defn: FuncBase) -> None:
"""Check an inplace operator method such as __iadd__.
They cannot arbitrarily overlap with __add__.
"""
method = defn.name
if method not in operators.inplace_operator_methods:
return
typ = bind_self(self.function_type(defn))
cls = defn.info
other_method = "__" + method[3:]
if cls.has_readable_member(other_method):
instance = fill_typevars(cls)
typ2 = get_proper_type(
self.expr_checker.analyze_external_member_access(other_method, instance, defn)
)
fail = False
if isinstance(typ2, FunctionLike):
if not is_more_general_arg_prefix(typ, typ2):
fail = True
else:
# TODO overloads
fail = True
if fail:
self.msg.signatures_incompatible(method, other_method, defn)
def check_getattr_method(self, typ: Type, context: Context, name: str) -> None:
if len(self.scope.stack) == 1:
# module scope
if name == "__getattribute__":
self.fail(message_registry.MODULE_LEVEL_GETATTRIBUTE, context)
return
# __getattr__ is fine at the module level as of Python 3.7 (PEP 562). We could
# show an error for Python < 3.7, but that would be annoying in code that supports
# both 3.7 and older versions.
method_type = CallableType(
[self.named_type("builtins.str")],
[nodes.ARG_POS],
[None],
AnyType(TypeOfAny.special_form),
self.named_type("builtins.function"),
)
elif self.scope.active_class():
method_type = CallableType(
[AnyType(TypeOfAny.special_form), self.named_type("builtins.str")],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
AnyType(TypeOfAny.special_form),
self.named_type("builtins.function"),
)
else:
return
if not is_subtype(typ, method_type):
self.msg.invalid_signature_for_special_method(typ, context, name)
def check_setattr_method(self, typ: Type, context: Context) -> None:
if not self.scope.active_class():
return
method_type = CallableType(
[
AnyType(TypeOfAny.special_form),
self.named_type("builtins.str"),
AnyType(TypeOfAny.special_form),
],
[nodes.ARG_POS, nodes.ARG_POS, nodes.ARG_POS],
[None, None, None],
NoneType(),
self.named_type("builtins.function"),
)
if not is_subtype(typ, method_type):
self.msg.invalid_signature_for_special_method(typ, context, "__setattr__")
def check_slots_definition(self, typ: Type, context: Context) -> None:
"""Check the type of __slots__."""
str_type = self.named_type("builtins.str")
expected_type = UnionType(
[str_type, self.named_generic_type("typing.Iterable", [str_type])]
)
self.check_subtype(
typ,
expected_type,
context,
message_registry.INVALID_TYPE_FOR_SLOTS,
"actual type",
"expected type",
code=codes.ASSIGNMENT,
)
def check_match_args(self, var: Var, typ: Type, context: Context) -> None:
"""Check that __match_args__ contains literal strings"""
if not self.scope.active_class():
return
typ = get_proper_type(typ)
if not isinstance(typ, TupleType) or not all(
is_string_literal(item) for item in typ.items
):
self.msg.note(
"__match_args__ must be a tuple containing string literals for checking "
"of match statements to work",
context,
code=codes.LITERAL_REQ,
)
def expand_typevars(
self, defn: FuncItem, typ: CallableType
) -> list[tuple[FuncItem, CallableType]]:
# TODO use generator
subst: list[list[tuple[TypeVarId, Type]]] = []
tvars = list(typ.variables) or []
if defn.info:
# Class type variables
tvars += defn.info.defn.type_vars or []
for tvar in tvars:
if isinstance(tvar, TypeVarType) and tvar.values:
subst.append([(tvar.id, value) for value in tvar.values])
# Make a copy of the function to check for each combination of
# value restricted type variables. (Except when running mypyc,
# where we need one canonical version of the function.)
if subst and not (self.options.mypyc or self.options.inspections):
result: list[tuple[FuncItem, CallableType]] = []
for substitutions in itertools.product(*subst):
mapping = dict(substitutions)
result.append((expand_func(defn, mapping), expand_type(typ, mapping)))
return result
else:
return [(defn, typ)]
def check_explicit_override_decorator(
self,
defn: FuncDef | OverloadedFuncDef,
found_method_base_classes: list[TypeInfo] | None,
context: Context | None = None,
) -> None:
plugin_generated = False
if defn.info and (node := defn.info.get(defn.name)) and node.plugin_generated:
# Do not report issues for plugin generated nodes,
# they can't realistically use `@override` for their methods.
plugin_generated = True
if (
not plugin_generated
and found_method_base_classes
and not defn.is_explicit_override
and defn.name not in ("__init__", "__new__")
and not is_private(defn.name)
):
self.msg.explicit_override_decorator_missing(
defn.name, found_method_base_classes[0].fullname, context or defn
)
def check_method_override(
self, defn: FuncDef | OverloadedFuncDef | Decorator
) -> list[TypeInfo] | None:
"""Check if function definition is compatible with base classes.
This may defer the method if a signature is not available in at least one base class.
Return ``None`` if that happens.
Return a list of base classes which contain an attribute with the method name.
"""
# Check against definitions in base classes.
check_override_compatibility = defn.name not in (
"__init__",
"__new__",
"__init_subclass__",
"__post_init__",
) and (self.options.check_untyped_defs or not defn.is_dynamic())
found_method_base_classes: list[TypeInfo] = []
for base in defn.info.mro[1:]:
result = self.check_method_or_accessor_override_for_base(
defn, base, check_override_compatibility
)
if result is None:
# Node was deferred, we will have another attempt later.
return None
if result:
found_method_base_classes.append(base)
return found_method_base_classes
def check_method_or_accessor_override_for_base(
self,
defn: FuncDef | OverloadedFuncDef | Decorator,
base: TypeInfo,
check_override_compatibility: bool,
) -> bool | None:
"""Check if method definition is compatible with a base class.
Return ``None`` if the node was deferred because one of the corresponding
superclass nodes is not ready.
Return ``True`` if an attribute with the method name was found in the base class.
"""
found_base_method = False
if base:
name = defn.name
base_attr = base.names.get(name)
if base_attr:
# First, check if we override a final (always an error, even with Any types).
if is_final_node(base_attr.node) and not is_private(name):
self.msg.cant_override_final(name, base.name, defn)
# Second, final can't override anything writeable independently of types.
if defn.is_final:
self.check_if_final_var_override_writable(name, base_attr.node, defn)
found_base_method = True
if check_override_compatibility:
# Check compatibility of the override signature
# (__init__, __new__, __init_subclass__ are special).
if self.check_method_override_for_base_with_name(defn, name, base):
return None
if name in operators.inplace_operator_methods:
# Figure out the name of the corresponding operator method.
method = "__" + name[3:]
# An inplace operator method such as __iadd__ might not be
# always introduced safely if a base class defined __add__.
# TODO can't come up with an example where this is
# necessary; now it's "just in case"
if self.check_method_override_for_base_with_name(defn, method, base):
return None
return found_base_method
def check_method_override_for_base_with_name(
self, defn: FuncDef | OverloadedFuncDef | Decorator, name: str, base: TypeInfo
) -> bool:
"""Check if overriding an attribute `name` of `base` with `defn` is valid.
Return True if the supertype node was not analysed yet, and `defn` was deferred.
"""
base_attr = base.names.get(name)
if base_attr:
# The name of the method is defined in the base class.
# Point errors at the 'def' line (important for backward compatibility
# of type ignores).
if not isinstance(defn, Decorator):
context = defn
else:
context = defn.func
# Construct the type of the overriding method.
# TODO: this logic is much less complete than similar one in checkmember.py
if isinstance(defn, (FuncDef, OverloadedFuncDef)):
typ: Type = self.function_type(defn)
override_class_or_static = defn.is_class or defn.is_static
override_class = defn.is_class
else:
assert defn.var.is_ready
assert defn.var.type is not None
typ = defn.var.type
override_class_or_static = defn.func.is_class or defn.func.is_static
override_class = defn.func.is_class
typ = get_proper_type(typ)
if isinstance(typ, FunctionLike) and not is_static(context):
typ = bind_self(typ, self.scope.active_self_type(), is_classmethod=override_class)
# Map the overridden method type to subtype context so that
# it can be checked for compatibility.
original_type = get_proper_type(base_attr.type)
original_node = base_attr.node
# `original_type` can be partial if (e.g.) it is originally an
# instance variable from an `__init__` block that becomes deferred.
if original_type is None or isinstance(original_type, PartialType):
if self.pass_num < self.last_pass:
# If there are passes left, defer this node until next pass,
# otherwise try reconstructing the method type from available information.
self.defer_node(defn, defn.info)
return True
elif isinstance(original_node, (FuncDef, OverloadedFuncDef)):
original_type = self.function_type(original_node)
elif isinstance(original_node, Decorator):
original_type = self.function_type(original_node.func)
elif isinstance(original_node, Var):
# Super type can define method as an attribute.
# See https://github.com/python/mypy/issues/10134
# We also check that sometimes `original_node.type` is None.
# This is the case when we use something like `__hash__ = None`.
if original_node.type is not None:
original_type = get_proper_type(original_node.type)
else:
original_type = NoneType()
else:
# Will always fail to typecheck below, since we know the node is a method
original_type = NoneType()
if isinstance(original_node, (FuncDef, OverloadedFuncDef)):
original_class_or_static = original_node.is_class or original_node.is_static
elif isinstance(original_node, Decorator):
fdef = original_node.func
original_class_or_static = fdef.is_class or fdef.is_static
else:
original_class_or_static = False # a variable can't be class or static
if isinstance(original_type, FunctionLike):
original_type = self.bind_and_map_method(base_attr, original_type, defn.info, base)
if original_node and is_property(original_node):
original_type = get_property_type(original_type)
if is_property(defn):
inner: FunctionLike | None
if isinstance(typ, FunctionLike):
inner = typ
else:
inner = self.extract_callable_type(typ, context)
if inner is not None:
typ = inner
typ = get_property_type(typ)
if (
isinstance(original_node, Var)
and not original_node.is_final
and (not original_node.is_property or original_node.is_settable_property)
and isinstance(defn, Decorator)
):
# We only give an error where no other similar errors will be given.
if not isinstance(original_type, AnyType):
self.msg.fail(
"Cannot override writeable attribute with read-only property",
# Give an error on function line to match old behaviour.
defn.func,
code=codes.OVERRIDE,
)
if isinstance(original_type, AnyType) or isinstance(typ, AnyType):
pass
elif isinstance(original_type, FunctionLike) and isinstance(typ, FunctionLike):
# Check that the types are compatible.
self.check_override(
typ,
original_type,
defn.name,
name,
base.name,
original_class_or_static,
override_class_or_static,
context,
)
elif is_equivalent(original_type, typ):
# Assume invariance for a non-callable attribute here. Note
# that this doesn't affect read-only properties which can have
# covariant overrides.
pass
elif (
original_node
and not self.is_writable_attribute(original_node)
and is_subtype(typ, original_type)
):
# If the attribute is read-only, allow covariance
pass
else:
self.msg.signature_incompatible_with_supertype(
defn.name, name, base.name, context, original=original_type, override=typ
)
return False
def bind_and_map_method(
self, sym: SymbolTableNode, typ: FunctionLike, sub_info: TypeInfo, super_info: TypeInfo
) -> FunctionLike:
"""Bind self-type and map type variables for a method.
Arguments:
sym: a symbol that points to method definition
typ: method type on the definition
sub_info: class where the method is used
super_info: class where the method was defined
"""
if isinstance(sym.node, (FuncDef, OverloadedFuncDef, Decorator)) and not is_static(
sym.node
):
if isinstance(sym.node, Decorator):
is_class_method = sym.node.func.is_class
else:
is_class_method = sym.node.is_class
mapped_typ = cast(FunctionLike, map_type_from_supertype(typ, sub_info, super_info))
active_self_type = self.scope.active_self_type()
if isinstance(mapped_typ, Overloaded) and active_self_type:
# If we have an overload, filter to overloads that match the self type.
# This avoids false positives for concrete subclasses of generic classes,
# see testSelfTypeOverrideCompatibility for an example.
filtered_items = []
for item in mapped_typ.items:
if not item.arg_types:
filtered_items.append(item)
item_arg = item.arg_types[0]
if isinstance(item_arg, TypeVarType):
item_arg = item_arg.upper_bound
if is_subtype(active_self_type, item_arg):
filtered_items.append(item)
# If we don't have any filtered_items, maybe it's always a valid override
# of the superclass? However if you get to that point you're in murky type
# territory anyway, so we just preserve the type and have the behaviour match
# that of older versions of mypy.
if filtered_items:
mapped_typ = Overloaded(filtered_items)
return bind_self(mapped_typ, active_self_type, is_class_method)
else:
return cast(FunctionLike, map_type_from_supertype(typ, sub_info, super_info))
def get_op_other_domain(self, tp: FunctionLike) -> Type | None:
if isinstance(tp, CallableType):
if tp.arg_kinds and tp.arg_kinds[0] == ARG_POS:
# For generic methods, domain comparison is tricky, as a first
# approximation erase all remaining type variables.
return erase_typevars(tp.arg_types[0], {v.id for v in tp.variables})
return None
elif isinstance(tp, Overloaded):
raw_items = [self.get_op_other_domain(it) for it in tp.items]
items = [it for it in raw_items if it]
if items:
return make_simplified_union(items)
return None
else:
assert False, "Need to check all FunctionLike subtypes here"
def check_override(
self,
override: FunctionLike,
original: FunctionLike,
name: str,
name_in_super: str,
supertype: str,
original_class_or_static: bool,
override_class_or_static: bool,
node: Context,
) -> None:
"""Check a method override with given signatures.
Arguments:
override: The signature of the overriding method.
original: The signature of the original supertype method.
name: The name of the overriding method.
Used primarily for generating error messages.
name_in_super: The name of the overridden in the superclass.
Used for generating error messages only.
supertype: The name of the supertype.
original_class_or_static: Indicates whether the original method (from the superclass)
is either a class method or a static method.
override_class_or_static: Indicates whether the overriding method (from the subclass)
is either a class method or a static method.
node: Context node.
"""
# Use boolean variable to clarify code.
fail = False
op_method_wider_note = False
if not is_subtype(override, original, ignore_pos_arg_names=True):
fail = True
elif isinstance(override, Overloaded) and self.is_forward_op_method(name):
# Operator method overrides cannot extend the domain, as
# this could be unsafe with reverse operator methods.
original_domain = self.get_op_other_domain(original)
override_domain = self.get_op_other_domain(override)
if (
original_domain
and override_domain
and not is_subtype(override_domain, original_domain)
):
fail = True
op_method_wider_note = True
if isinstance(override, FunctionLike):
if original_class_or_static and not override_class_or_static:
fail = True
elif isinstance(original, CallableType) and isinstance(override, CallableType):
if original.type_guard is not None and override.type_guard is None:
fail = True
if original.type_is is not None and override.type_is is None:
fail = True
if is_private(name):
fail = False
if fail:
emitted_msg = False
offset_arguments = isinstance(override, CallableType) and override.unpack_kwargs
# Normalize signatures, so we get better diagnostics.
if isinstance(override, (CallableType, Overloaded)):
override = override.with_unpacked_kwargs()
if isinstance(original, (CallableType, Overloaded)):
original = original.with_unpacked_kwargs()
if (
isinstance(override, CallableType)
and isinstance(original, CallableType)
and len(override.arg_types) == len(original.arg_types)
and override.min_args == original.min_args
):
# Give more detailed messages for the common case of both
# signatures having the same number of arguments and no
# overloads.
# override might have its own generic function type
# variables. If an argument or return type of override
# does not have the correct subtyping relationship
# with the original type even after these variables
# are erased, then it is definitely an incompatibility.
override_ids = override.type_var_ids()
type_name = None
if isinstance(override.definition, FuncDef):
type_name = override.definition.info.name
def erase_override(t: Type) -> Type:
return erase_typevars(t, ids_to_erase=override_ids)
for i, (sub_kind, super_kind) in enumerate(
zip(override.arg_kinds, original.arg_kinds)
):
if sub_kind.is_positional() and super_kind.is_positional():
override_arg_type = override.arg_types[i]
original_arg_type = original.arg_types[i]
elif sub_kind.is_named() and super_kind.is_named() and not offset_arguments:
arg_name = override.arg_names[i]
if arg_name in original.arg_names:
override_arg_type = override.arg_types[i]
original_i = original.arg_names.index(arg_name)
original_arg_type = original.arg_types[original_i]
else:
continue
else:
continue
if not is_subtype(original_arg_type, erase_override(override_arg_type)):
if isinstance(node, FuncDef) and not node.is_property:
context: Context = node.arguments[i + len(override.bound_args)]
else:
context = node
self.msg.argument_incompatible_with_supertype(
i + 1,
name,
type_name,
name_in_super,
original_arg_type,
supertype,
context,
secondary_context=node,
)
emitted_msg = True
if not is_subtype(erase_override(override.ret_type), original.ret_type):
self.msg.return_type_incompatible_with_supertype(
name, name_in_super, supertype, original.ret_type, override.ret_type, node
)
emitted_msg = True
elif isinstance(override, Overloaded) and isinstance(original, Overloaded):
# Give a more detailed message in the case where the user is trying to
# override an overload, and the subclass's overload is plausible, except
# that the order of the variants are wrong.
#
# For example, if the parent defines the overload f(int) -> int and f(str) -> str
# (in that order), and if the child swaps the two and does f(str) -> str and
# f(int) -> int
order = []
for child_variant in override.items:
for i, parent_variant in enumerate(original.items):
if is_subtype(child_variant, parent_variant):
order.append(i)
break
if len(order) == len(original.items) and order != sorted(order):
self.msg.overload_signature_incompatible_with_supertype(
name, name_in_super, supertype, node
)
emitted_msg = True
if not emitted_msg:
# Fall back to generic incompatibility message.
self.msg.signature_incompatible_with_supertype(
name, name_in_super, supertype, node, original=original, override=override
)
if op_method_wider_note:
self.note(
"Overloaded operator methods can't have wider argument types in overrides",
node,
code=codes.OVERRIDE,
)
def check__exit__return_type(self, defn: FuncItem) -> None:
"""Generate error if the return type of __exit__ is problematic.
If __exit__ always returns False but the return type is declared
as bool, mypy thinks that a with statement may "swallow"
exceptions even though this is not the case, resulting in
invalid reachability inference.
"""
if not defn.type or not isinstance(defn.type, CallableType):
return
ret_type = get_proper_type(defn.type.ret_type)
if not has_bool_item(ret_type):
return
returns = all_return_statements(defn)
if not returns:
return
if all(
isinstance(ret.expr, NameExpr) and ret.expr.fullname == "builtins.False"
for ret in returns
):
self.msg.incorrect__exit__return(defn)
def visit_class_def(self, defn: ClassDef) -> None:
"""Type check a class definition."""
typ = defn.info
for base in typ.mro[1:]:
if base.is_final:
self.fail(message_registry.CANNOT_INHERIT_FROM_FINAL.format(base.name), defn)
with self.tscope.class_scope(defn.info), self.enter_partial_types(is_class=True):
old_binder = self.binder
self.binder = ConditionalTypeBinder()
with self.binder.top_frame_context():
with self.scope.push_class(defn.info):
self.accept(defn.defs)
self.binder = old_binder
if not (defn.info.typeddict_type or defn.info.tuple_type or defn.info.is_enum):
# If it is not a normal class (not a special form) check class keywords.
self.check_init_subclass(defn)
if not defn.has_incompatible_baseclass:
# Otherwise we've already found errors; more errors are not useful
self.check_multiple_inheritance(typ)
self.check_metaclass_compatibility(typ)
self.check_final_deletable(typ)
if defn.decorators:
sig: Type = type_object_type(defn.info, self.named_type)
# Decorators are applied in reverse order.
for decorator in reversed(defn.decorators):
if isinstance(decorator, CallExpr) and isinstance(
decorator.analyzed, PromoteExpr
):
# _promote is a special type checking related construct.
continue
dec = self.expr_checker.accept(decorator)
temp = self.temp_node(sig, context=decorator)
fullname = None
if isinstance(decorator, RefExpr):
fullname = decorator.fullname or None
# TODO: Figure out how to have clearer error messages.
# (e.g. "class decorator must be a function that accepts a type."
old_allow_abstract_call = self.allow_abstract_call
self.allow_abstract_call = True
sig, _ = self.expr_checker.check_call(
dec, [temp], [nodes.ARG_POS], defn, callable_name=fullname
)
self.allow_abstract_call = old_allow_abstract_call
# TODO: Apply the sig to the actual TypeInfo so we can handle decorators
# that completely swap out the type. (e.g. Callable[[Type[A]], Type[B]])
if typ.defn.type_vars and typ.defn.type_args is None:
for base_inst in typ.bases:
for base_tvar, base_decl_tvar in zip(
base_inst.args, base_inst.type.defn.type_vars
):
if (
isinstance(base_tvar, TypeVarType)
and base_tvar.variance != INVARIANT
and isinstance(base_decl_tvar, TypeVarType)
and base_decl_tvar.variance != base_tvar.variance
):
self.fail(
f'Variance of TypeVar "{base_tvar.name}" incompatible '
"with variance in parent type",
context=defn,
code=codes.TYPE_VAR,
)
if typ.is_protocol and typ.defn.type_vars:
self.check_protocol_variance(defn)
if not defn.has_incompatible_baseclass and defn.info.is_enum:
self.check_enum(defn)
infer_class_variances(defn.info)
def check_final_deletable(self, typ: TypeInfo) -> None:
# These checks are only for mypyc. Only perform some checks that are easier
# to implement here than in mypyc.
for attr in typ.deletable_attributes:
node = typ.names.get(attr)
if node and isinstance(node.node, Var) and node.node.is_final:
self.fail(message_registry.CANNOT_MAKE_DELETABLE_FINAL, node.node)
def check_init_subclass(self, defn: ClassDef) -> None:
"""Check that keywords in a class definition are valid arguments for __init_subclass__().
In this example:
1 class Base:
2 def __init_subclass__(cls, thing: int):
3 pass
4 class Child(Base, thing=5):
5 def __init_subclass__(cls):
6 pass
7 Child()
Base.__init_subclass__(thing=5) is called at line 4. This is what we simulate here.
Child.__init_subclass__ is never called.
"""
if defn.info.metaclass_type and defn.info.metaclass_type.type.fullname not in (
"builtins.type",
"abc.ABCMeta",
):
# We can't safely check situations when both __init_subclass__ and a custom
# metaclass are present.
return
# At runtime, only Base.__init_subclass__ will be called, so
# we skip the current class itself.
for base in defn.info.mro[1:]:
if "__init_subclass__" not in base.names:
continue
name_expr = NameExpr(defn.name)
name_expr.node = base
callee = MemberExpr(name_expr, "__init_subclass__")
args = list(defn.keywords.values())
arg_names: list[str | None] = list(defn.keywords.keys())
# 'metaclass' keyword is consumed by the rest of the type machinery,
# and is never passed to __init_subclass__ implementations
if "metaclass" in arg_names:
idx = arg_names.index("metaclass")
arg_names.pop(idx)
args.pop(idx)
arg_kinds = [ARG_NAMED] * len(args)
call_expr = CallExpr(callee, args, arg_kinds, arg_names)
call_expr.line = defn.line
call_expr.column = defn.column
call_expr.end_line = defn.end_line
self.expr_checker.accept(call_expr, allow_none_return=True, always_allow_any=True)
# We are only interested in the first Base having __init_subclass__,
# all other bases have already been checked.
break
def check_enum(self, defn: ClassDef) -> None:
assert defn.info.is_enum
if defn.info.fullname not in ENUM_BASES:
for sym in defn.info.names.values():
if (
isinstance(sym.node, Var)
and sym.node.has_explicit_value
and sym.node.name == "__members__"
):
# `__members__` will always be overwritten by `Enum` and is considered
# read-only so we disallow assigning a value to it
self.fail(message_registry.ENUM_MEMBERS_ATTR_WILL_BE_OVERRIDEN, sym.node)
for base in defn.info.mro[1:-1]: # we don't need self and `object`
if base.is_enum and base.fullname not in ENUM_BASES:
self.check_final_enum(defn, base)
self.check_enum_bases(defn)
self.check_enum_new(defn)
def check_final_enum(self, defn: ClassDef, base: TypeInfo) -> None:
for sym in base.names.values():
if self.is_final_enum_value(sym):
self.fail(f'Cannot extend enum with existing members: "{base.name}"', defn)
break
def is_final_enum_value(self, sym: SymbolTableNode) -> bool:
if isinstance(sym.node, (FuncBase, Decorator)):
return False # A method is fine
if not isinstance(sym.node, Var):
return True # Can be a class or anything else
# Now, only `Var` is left, we need to check:
# 1. Private name like in `__prop = 1`
# 2. Dunder name like `__hash__ = some_hasher`
# 3. Sunder name like `_order_ = 'a, b, c'`
# 4. If it is a method / descriptor like in `method = classmethod(func)`
if (
is_private(sym.node.name)
or is_dunder(sym.node.name)
or is_sunder(sym.node.name)
# TODO: make sure that `x = @class/staticmethod(func)`
# and `x = property(prop)` both work correctly.
# Now they are incorrectly counted as enum members.
or isinstance(get_proper_type(sym.node.type), FunctionLike)
):
return False
return self.is_stub or sym.node.has_explicit_value
def check_enum_bases(self, defn: ClassDef) -> None:
"""
Non-enum mixins cannot appear after enum bases; this is disallowed at runtime:
class Foo: ...
class Bar(enum.Enum, Foo): ...
But any number of enum mixins can appear in a class definition
(even if multiple enum bases define __new__). So this is fine:
class Foo(enum.Enum):
def __new__(cls, val): ...
class Bar(enum.Enum):
def __new__(cls, val): ...
class Baz(int, Foo, Bar, enum.Flag): ...
"""
enum_base: Instance | None = None
for base in defn.info.bases:
if enum_base is None and base.type.is_enum:
enum_base = base
continue
elif enum_base is not None and not base.type.is_enum:
self.fail(
f'No non-enum mixin classes are allowed after "{enum_base.str_with_options(self.options)}"',
defn,
)
break
def check_enum_new(self, defn: ClassDef) -> None:
def has_new_method(info: TypeInfo) -> bool:
new_method = info.get("__new__")
return bool(
new_method
and new_method.node
and new_method.node.fullname != "builtins.object.__new__"
)
has_new = False
for base in defn.info.bases:
candidate = False
if base.type.is_enum:
# If we have an `Enum`, then we need to check all its bases.
candidate = any(not b.is_enum and has_new_method(b) for b in base.type.mro[1:-1])
else:
candidate = has_new_method(base.type)
if candidate and has_new:
self.fail(
"Only a single data type mixin is allowed for Enum subtypes, "
'found extra "{}"'.format(base.str_with_options(self.options)),
defn,
)
elif candidate:
has_new = True
def check_protocol_variance(self, defn: ClassDef) -> None:
"""Check that protocol definition is compatible with declared
variances of type variables.
Note that we also prohibit declaring protocol classes as invariant
if they are actually covariant/contravariant, since this may break
transitivity of subtyping, see PEP 544.
"""
if defn.type_args is not None:
# Using new-style syntax (PEP 695), so variance will be inferred
return
info = defn.info
object_type = Instance(info.mro[-1], [])
tvars = info.defn.type_vars
for i, tvar in enumerate(tvars):
if not isinstance(tvar, TypeVarType):
# Variance of TypeVarTuple and ParamSpec is underspecified by PEPs.
continue
up_args: list[Type] = [
object_type if i == j else AnyType(TypeOfAny.special_form)
for j, _ in enumerate(tvars)
]
down_args: list[Type] = [
UninhabitedType() if i == j else AnyType(TypeOfAny.special_form)
for j, _ in enumerate(tvars)
]
up, down = Instance(info, up_args), Instance(info, down_args)
# TODO: add advanced variance checks for recursive protocols
if is_subtype(down, up, ignore_declared_variance=True):
expected = COVARIANT
elif is_subtype(up, down, ignore_declared_variance=True):
expected = CONTRAVARIANT
else:
expected = INVARIANT
if expected != tvar.variance:
self.msg.bad_proto_variance(tvar.variance, tvar.name, expected, defn)
def check_multiple_inheritance(self, typ: TypeInfo) -> None:
"""Check for multiple inheritance related errors."""
if len(typ.bases) <= 1:
# No multiple inheritance.
return
# Verify that inherited attributes are compatible.
mro = typ.mro[1:]
for i, base in enumerate(mro):
# Attributes defined in both the type and base are skipped.
# Normal checks for attribute compatibility should catch any problems elsewhere.
non_overridden_attrs = base.names.keys() - typ.names.keys()
for name in non_overridden_attrs:
if is_private(name):
continue
for base2 in mro[i + 1 :]:
# We only need to check compatibility of attributes from classes not
# in a subclass relationship. For subclasses, normal (single inheritance)
# checks suffice (these are implemented elsewhere).
if name in base2.names and base2 not in base.mro:
self.check_compatibility(name, base, base2, typ)
def determine_type_of_member(self, sym: SymbolTableNode) -> Type | None:
if sym.type is not None:
return sym.type
if isinstance(sym.node, FuncBase):
return self.function_type(sym.node)
if isinstance(sym.node, TypeInfo):
if sym.node.typeddict_type:
# We special-case TypedDict, because they don't define any constructor.
return self.expr_checker.typeddict_callable(sym.node)
else:
return type_object_type(sym.node, self.named_type)
if isinstance(sym.node, TypeVarExpr):
# Use of TypeVars is rejected in an expression/runtime context, so
# we don't need to check supertype compatibility for them.
return AnyType(TypeOfAny.special_form)
if isinstance(sym.node, TypeAlias):
with self.msg.filter_errors():
# Suppress any errors, they will be given when analyzing the corresponding node.
# Here we may have incorrect options and location context.
return self.expr_checker.alias_type_in_runtime_context(sym.node, ctx=sym.node)
# TODO: handle more node kinds here.
return None
def check_compatibility(
self, name: str, base1: TypeInfo, base2: TypeInfo, ctx: TypeInfo
) -> None:
"""Check if attribute name in base1 is compatible with base2 in multiple inheritance.
Assume base1 comes before base2 in the MRO, and that base1 and base2 don't have
a direct subclass relationship (i.e., the compatibility requirement only derives from
multiple inheritance).
This check verifies that a definition taken from base1 (and mapped to the current
class ctx), is type compatible with the definition taken from base2 (also mapped), so
that unsafe subclassing like this can be detected:
class A(Generic[T]):
def foo(self, x: T) -> None: ...
class B:
def foo(self, x: str) -> None: ...
class C(B, A[int]): ... # this is unsafe because...
x: A[int] = C()
x.foo # ...runtime type is (str) -> None, while static type is (int) -> None
"""
if name in ("__init__", "__new__", "__init_subclass__"):
# __init__ and friends can be incompatible -- it's a special case.
return
first = base1.names[name]
second = base2.names[name]
first_type = get_proper_type(self.determine_type_of_member(first))
second_type = get_proper_type(self.determine_type_of_member(second))
# TODO: use more principled logic to decide is_subtype() vs is_equivalent().
# We should rely on mutability of superclass node, not on types being Callable.
# start with the special case that Instance can be a subtype of FunctionLike
call = None
if isinstance(first_type, Instance):
call = find_member("__call__", first_type, first_type, is_operator=True)
if call and isinstance(second_type, FunctionLike):
second_sig = self.bind_and_map_method(second, second_type, ctx, base2)
ok = is_subtype(call, second_sig, ignore_pos_arg_names=True)
elif isinstance(first_type, FunctionLike) and isinstance(second_type, FunctionLike):
if first_type.is_type_obj() and second_type.is_type_obj():
# For class objects only check the subtype relationship of the classes,
# since we allow incompatible overrides of '__init__'/'__new__'
ok = is_subtype(
left=fill_typevars_with_any(first_type.type_object()),
right=fill_typevars_with_any(second_type.type_object()),
)
else:
# First bind/map method types when necessary.
first_sig = self.bind_and_map_method(first, first_type, ctx, base1)
second_sig = self.bind_and_map_method(second, second_type, ctx, base2)
ok = is_subtype(first_sig, second_sig, ignore_pos_arg_names=True)
elif first_type and second_type:
if isinstance(first.node, Var):
first_type = expand_self_type(first.node, first_type, fill_typevars(ctx))
if isinstance(second.node, Var):
second_type = expand_self_type(second.node, second_type, fill_typevars(ctx))
ok = is_equivalent(first_type, second_type)
if not ok:
second_node = base2[name].node
if (
isinstance(second_type, FunctionLike)
and second_node is not None
and is_property(second_node)
):
second_type = get_property_type(second_type)
ok = is_subtype(first_type, second_type)
else:
if first_type is None:
self.msg.cannot_determine_type_in_base(name, base1.name, ctx)
if second_type is None:
self.msg.cannot_determine_type_in_base(name, base2.name, ctx)
ok = True
# Final attributes can never be overridden, but can override
# non-final read-only attributes.
if is_final_node(second.node) and not is_private(name):
self.msg.cant_override_final(name, base2.name, ctx)
if is_final_node(first.node):
self.check_if_final_var_override_writable(name, second.node, ctx)
# Some attributes like __slots__ and __deletable__ are special, and the type can
# vary across class hierarchy.
if isinstance(second.node, Var) and second.node.allow_incompatible_override:
ok = True
if not ok:
self.msg.base_class_definitions_incompatible(name, base1, base2, ctx)
def check_metaclass_compatibility(self, typ: TypeInfo) -> None:
"""Ensures that metaclasses of all parent types are compatible."""
if (
typ.is_metaclass()
or typ.is_protocol
or typ.is_named_tuple
or typ.is_enum
or typ.typeddict_type is not None
):
return # Reasonable exceptions from this check
metaclasses = [
entry.metaclass_type
for entry in typ.mro[1:-1]
if entry.metaclass_type
and not is_named_instance(entry.metaclass_type, "builtins.type")
]
if not metaclasses:
return
if typ.metaclass_type is not None and all(
is_subtype(typ.metaclass_type, meta) for meta in metaclasses
):
return
self.fail(
"Metaclass conflict: the metaclass of a derived class must be "
"a (non-strict) subclass of the metaclasses of all its bases",
typ,
)
def visit_import_from(self, node: ImportFrom) -> None:
self.check_import(node)
def visit_import_all(self, node: ImportAll) -> None:
self.check_import(node)
def visit_import(self, node: Import) -> None:
self.check_import(node)
def check_import(self, node: ImportBase) -> None:
for assign in node.assignments:
lvalue = assign.lvalues[0]
lvalue_type, _, __ = self.check_lvalue(lvalue)
if lvalue_type is None:
# TODO: This is broken.
lvalue_type = AnyType(TypeOfAny.special_form)
assert isinstance(assign.rvalue, NameExpr)
message = message_registry.INCOMPATIBLE_IMPORT_OF.format(assign.rvalue.name)
self.check_simple_assignment(
lvalue_type,
assign.rvalue,
node,
msg=message,
lvalue_name="local name",
rvalue_name="imported name",
)
#
# Statements
#
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
# This block was marked as being unreachable during semantic analysis.
# It turns out any blocks marked in this way are *intentionally* marked
# as unreachable -- so we don't display an error.
self.binder.unreachable()
return
for s in b.body:
if self.binder.is_unreachable():
if not self.should_report_unreachable_issues():
break
if not self.is_noop_for_reachability(s):
self.msg.unreachable_statement(s)
break
else:
self.accept(s)
def should_report_unreachable_issues(self) -> bool:
return (
self.in_checked_function()
and self.options.warn_unreachable
and not self.current_node_deferred
and not self.binder.is_unreachable_warning_suppressed()
)
def is_noop_for_reachability(self, s: Statement) -> bool:
"""Returns 'true' if the given statement either throws an error of some kind
or is a no-op.
We use this function while handling the '--warn-unreachable' flag. When
that flag is present, we normally report an error on any unreachable statement.
But if that statement is just something like a 'pass' or a just-in-case 'assert False',
reporting an error would be annoying.
"""
if isinstance(s, AssertStmt) and is_false_literal(s.expr):
return True
elif isinstance(s, (RaiseStmt, PassStmt)):
return True
elif isinstance(s, ExpressionStmt):
if isinstance(s.expr, EllipsisExpr):
return True
elif isinstance(s.expr, CallExpr):
with self.expr_checker.msg.filter_errors():
typ = get_proper_type(
self.expr_checker.accept(
s.expr, allow_none_return=True, always_allow_any=True
)
)
if isinstance(typ, UninhabitedType):
return True
return False
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
"""Type check an assignment statement.
Handle all kinds of assignment statements (simple, indexed, multiple).
"""
# Avoid type checking type aliases in stubs to avoid false
# positives about modern type syntax available in stubs such
# as X | Y.
if not (s.is_alias_def and self.is_stub):
with self.enter_final_context(s.is_final_def):
self.check_assignment(s.lvalues[-1], s.rvalue, s.type is None, s.new_syntax)
if s.is_alias_def:
self.check_type_alias_rvalue(s)
if (
s.type is not None
and self.options.disallow_any_unimported
and has_any_from_unimported_type(s.type)
):
if isinstance(s.lvalues[-1], TupleExpr):
# This is a multiple assignment. Instead of figuring out which type is problematic,
# give a generic error message.
self.msg.unimported_type_becomes_any(
"A type on this line", AnyType(TypeOfAny.special_form), s
)
else:
self.msg.unimported_type_becomes_any("Type of variable", s.type, s)
check_for_explicit_any(s.type, self.options, self.is_typeshed_stub, self.msg, context=s)
if len(s.lvalues) > 1:
# Chained assignment (e.g. x = y = ...).
# Make sure that rvalue type will not be reinferred.
if not self.has_type(s.rvalue):
self.expr_checker.accept(s.rvalue)
rvalue = self.temp_node(self.lookup_type(s.rvalue), s)
for lv in s.lvalues[:-1]:
with self.enter_final_context(s.is_final_def):
self.check_assignment(lv, rvalue, s.type is None)
self.check_final(s)
if (
s.is_final_def
and s.type
and not has_no_typevars(s.type)
and self.scope.active_class() is not None
):
self.fail(message_registry.DEPENDENT_FINAL_IN_CLASS_BODY, s)
if s.unanalyzed_type and not self.in_checked_function():
self.msg.annotation_in_unchecked_function(context=s)
def check_type_alias_rvalue(self, s: AssignmentStmt) -> None:
with self.msg.filter_errors():
alias_type = self.expr_checker.accept(s.rvalue)
self.store_type(s.lvalues[-1], alias_type)
def check_assignment(
self,
lvalue: Lvalue,
rvalue: Expression,
infer_lvalue_type: bool = True,
new_syntax: bool = False,
) -> None:
"""Type check a single assignment: lvalue = rvalue."""
if isinstance(lvalue, (TupleExpr, ListExpr)):
self.check_assignment_to_multiple_lvalues(
lvalue.items, rvalue, rvalue, infer_lvalue_type
)
else:
self.try_infer_partial_generic_type_from_assignment(lvalue, rvalue, "=")
lvalue_type, index_lvalue, inferred = self.check_lvalue(lvalue)
# If we're assigning to __getattr__ or similar methods, check that the signature is
# valid.
if isinstance(lvalue, NameExpr) and lvalue.node:
name = lvalue.node.name
if name in ("__setattr__", "__getattribute__", "__getattr__"):
# If an explicit type is given, use that.
if lvalue_type:
signature = lvalue_type
else:
signature = self.expr_checker.accept(rvalue)
if signature:
if name == "__setattr__":
self.check_setattr_method(signature, lvalue)
else:
self.check_getattr_method(signature, lvalue, name)
if name == "__slots__":
typ = lvalue_type or self.expr_checker.accept(rvalue)
self.check_slots_definition(typ, lvalue)
if name == "__match_args__" and inferred is not None:
typ = self.expr_checker.accept(rvalue)
self.check_match_args(inferred, typ, lvalue)
if name == "__post_init__":
active_class = self.scope.active_class()
if active_class and dataclasses_plugin.is_processed_dataclass(active_class):
self.fail(message_registry.DATACLASS_POST_INIT_MUST_BE_A_FUNCTION, rvalue)
# Defer PartialType's super type checking.
if (
isinstance(lvalue, RefExpr)
and not (isinstance(lvalue_type, PartialType) and lvalue_type.type is None)
and not (isinstance(lvalue, NameExpr) and lvalue.name == "__match_args__")
):
if self.check_compatibility_all_supers(lvalue, lvalue_type, rvalue):
# We hit an error on this line; don't check for any others
return
if isinstance(lvalue, MemberExpr) and lvalue.name == "__match_args__":
self.fail(message_registry.CANNOT_MODIFY_MATCH_ARGS, lvalue)
if lvalue_type:
if isinstance(lvalue_type, PartialType) and lvalue_type.type is None:
# Try to infer a proper type for a variable with a partial None type.
rvalue_type = self.expr_checker.accept(rvalue)
if isinstance(get_proper_type(rvalue_type), NoneType):
# This doesn't actually provide any additional information -- multiple
# None initializers preserve the partial None type.
return
var = lvalue_type.var
if is_valid_inferred_type(rvalue_type, is_lvalue_final=var.is_final):
partial_types = self.find_partial_types(var)
if partial_types is not None:
if not self.current_node_deferred:
# Partial type can't be final, so strip any literal values.
rvalue_type = remove_instance_last_known_values(rvalue_type)
inferred_type = make_simplified_union([rvalue_type, NoneType()])
self.set_inferred_type(var, lvalue, inferred_type)
else:
var.type = None
del partial_types[var]
lvalue_type = var.type
else:
# Try to infer a partial type. No need to check the return value, as
# an error will be reported elsewhere.
self.infer_partial_type(lvalue_type.var, lvalue, rvalue_type)
# Handle None PartialType's super type checking here, after it's resolved.
if isinstance(lvalue, RefExpr) and self.check_compatibility_all_supers(
lvalue, lvalue_type, rvalue
):
# We hit an error on this line; don't check for any others
return
elif (
is_literal_none(rvalue)
and isinstance(lvalue, NameExpr)
and isinstance(lvalue.node, Var)
and lvalue.node.is_initialized_in_class
and not new_syntax
):
# Allow None's to be assigned to class variables with non-Optional types.
rvalue_type = lvalue_type
elif (
isinstance(lvalue, MemberExpr) and lvalue.kind is None
): # Ignore member access to modules
instance_type = self.expr_checker.accept(lvalue.expr)
rvalue_type, lvalue_type, infer_lvalue_type = self.check_member_assignment(
instance_type, lvalue_type, rvalue, context=rvalue
)
else:
# Hacky special case for assigning a literal None
# to a variable defined in a previous if
# branch. When we detect this, we'll go back and
# make the type optional. This is somewhat
# unpleasant, and a generalization of this would
# be an improvement!
if (
is_literal_none(rvalue)
and isinstance(lvalue, NameExpr)
and lvalue.kind == LDEF
and isinstance(lvalue.node, Var)
and lvalue.node.type
and lvalue.node in self.var_decl_frames
and not isinstance(get_proper_type(lvalue_type), AnyType)
):
decl_frame_map = self.var_decl_frames[lvalue.node]
# Check if the nearest common ancestor frame for the definition site
# and the current site is the enclosing frame of an if/elif/else block.
has_if_ancestor = False
for frame in reversed(self.binder.frames):
if frame.id in decl_frame_map:
has_if_ancestor = frame.conditional_frame
break
if has_if_ancestor:
lvalue_type = make_optional_type(lvalue_type)
self.set_inferred_type(lvalue.node, lvalue, lvalue_type)
rvalue_type = self.check_simple_assignment(lvalue_type, rvalue, context=rvalue)
# Special case: only non-abstract non-protocol classes can be assigned to
# variables with explicit type Type[A], where A is protocol or abstract.
p_rvalue_type = get_proper_type(rvalue_type)
p_lvalue_type = get_proper_type(lvalue_type)
if (
isinstance(p_rvalue_type, FunctionLike)
and p_rvalue_type.is_type_obj()
and (
p_rvalue_type.type_object().is_abstract
or p_rvalue_type.type_object().is_protocol
)
and isinstance(p_lvalue_type, TypeType)
and isinstance(p_lvalue_type.item, Instance)
and (
p_lvalue_type.item.type.is_abstract or p_lvalue_type.item.type.is_protocol
)
):
self.msg.concrete_only_assign(p_lvalue_type, rvalue)
return
if rvalue_type and infer_lvalue_type and not isinstance(lvalue_type, PartialType):
# Don't use type binder for definitions of special forms, like named tuples.
if not (isinstance(lvalue, NameExpr) and lvalue.is_special_form):
self.binder.assign_type(lvalue, rvalue_type, lvalue_type, False)
elif index_lvalue:
self.check_indexed_assignment(index_lvalue, rvalue, lvalue)
if inferred:
type_context = self.get_variable_type_context(inferred)
rvalue_type = self.expr_checker.accept(rvalue, type_context=type_context)
if not (
inferred.is_final
or (isinstance(lvalue, NameExpr) and lvalue.name == "__match_args__")
):
rvalue_type = remove_instance_last_known_values(rvalue_type)
self.infer_variable_type(inferred, lvalue, rvalue_type, rvalue)
self.check_assignment_to_slots(lvalue)
# (type, operator) tuples for augmented assignments supported with partial types
partial_type_augmented_ops: Final = {("builtins.list", "+"), ("builtins.set", "|")}
def get_variable_type_context(self, inferred: Var) -> Type | None:
type_contexts = []
if inferred.info:
for base in inferred.info.mro[1:]:
base_type, base_node = self.lvalue_type_from_base(inferred, base)
if (
base_type
and not (isinstance(base_node, Var) and base_node.invalid_partial_type)
and not isinstance(base_type, PartialType)
):
type_contexts.append(base_type)
# Use most derived supertype as type context if available.
if not type_contexts:
return None
candidate = type_contexts[0]
for other in type_contexts:
if is_proper_subtype(other, candidate):
candidate = other
elif not is_subtype(candidate, other):
# Multiple incompatible candidates, cannot use any of them as context.
return None
return candidate
def try_infer_partial_generic_type_from_assignment(
self, lvalue: Lvalue, rvalue: Expression, op: str
) -> None:
"""Try to infer a precise type for partial generic type from assignment.
'op' is '=' for normal assignment and a binary operator ('+', ...) for
augmented assignment.
Example where this happens:
x = []
if foo():
x = [1] # Infer List[int] as type of 'x'
"""
var = None
if (
isinstance(lvalue, NameExpr)
and isinstance(lvalue.node, Var)
and isinstance(lvalue.node.type, PartialType)
):
var = lvalue.node
elif isinstance(lvalue, MemberExpr):
var = self.expr_checker.get_partial_self_var(lvalue)
if var is not None:
typ = var.type
assert isinstance(typ, PartialType)
if typ.type is None:
return
# Return if this is an unsupported augmented assignment.
if op != "=" and (typ.type.fullname, op) not in self.partial_type_augmented_ops:
return
# TODO: some logic here duplicates the None partial type counterpart
# inlined in check_assignment(), see #8043.
partial_types = self.find_partial_types(var)
if partial_types is None:
return
rvalue_type = self.expr_checker.accept(rvalue)
rvalue_type = get_proper_type(rvalue_type)
if isinstance(rvalue_type, Instance):
if rvalue_type.type == typ.type and is_valid_inferred_type(rvalue_type):
var.type = rvalue_type
del partial_types[var]
elif isinstance(rvalue_type, AnyType):
var.type = fill_typevars_with_any(typ.type)
del partial_types[var]
def check_compatibility_all_supers(
self, lvalue: RefExpr, lvalue_type: Type | None, rvalue: Expression
) -> bool:
lvalue_node = lvalue.node
# Check if we are a class variable with at least one base class
if (
isinstance(lvalue_node, Var)
and lvalue.kind in (MDEF, None)
and len(lvalue_node.info.bases) > 0 # None for Vars defined via self
):
for base in lvalue_node.info.mro[1:]:
tnode = base.names.get(lvalue_node.name)
if tnode is not None:
if not self.check_compatibility_classvar_super(lvalue_node, base, tnode.node):
# Show only one error per variable
break
if not self.check_compatibility_final_super(lvalue_node, base, tnode.node):
# Show only one error per variable
break
direct_bases = lvalue_node.info.direct_base_classes()
last_immediate_base = direct_bases[-1] if direct_bases else None
for base in lvalue_node.info.mro[1:]:
# The type of "__slots__" and some other attributes usually doesn't need to
# be compatible with a base class. We'll still check the type of "__slots__"
# against "object" as an exception.
if lvalue_node.allow_incompatible_override and not (
lvalue_node.name == "__slots__" and base.fullname == "builtins.object"
):
continue
if is_private(lvalue_node.name):
continue
base_type, base_node = self.lvalue_type_from_base(lvalue_node, base)
if isinstance(base_type, PartialType):
base_type = None
if base_type:
assert base_node is not None
if not self.check_compatibility_super(
lvalue, lvalue_type, rvalue, base, base_type, base_node
):
# Only show one error per variable; even if other
# base classes are also incompatible
return True
if base is last_immediate_base:
# At this point, the attribute was found to be compatible with all
# immediate parents.
break
return False
def check_compatibility_super(
self,
lvalue: RefExpr,
lvalue_type: Type | None,
rvalue: Expression,
base: TypeInfo,
base_type: Type,
base_node: Node,
) -> bool:
lvalue_node = lvalue.node
assert isinstance(lvalue_node, Var)
# Do not check whether the rvalue is compatible if the
# lvalue had a type defined; this is handled by other
# parts, and all we have to worry about in that case is
# that lvalue is compatible with the base class.
compare_node = None
if lvalue_type:
compare_type = lvalue_type
compare_node = lvalue.node
else:
compare_type = self.expr_checker.accept(rvalue, base_type)
if isinstance(rvalue, NameExpr):
compare_node = rvalue.node
if isinstance(compare_node, Decorator):
compare_node = compare_node.func
base_type = get_proper_type(base_type)
compare_type = get_proper_type(compare_type)
if compare_type:
if isinstance(base_type, CallableType) and isinstance(compare_type, CallableType):
base_static = is_node_static(base_node)
compare_static = is_node_static(compare_node)
# In case compare_static is unknown, also check
# if 'definition' is set. The most common case for
# this is with TempNode(), where we lose all
# information about the real rvalue node (but only get
# the rvalue type)
if compare_static is None and compare_type.definition:
compare_static = is_node_static(compare_type.definition)
# Compare against False, as is_node_static can return None
if base_static is False and compare_static is False:
# Class-level function objects and classmethods become bound
# methods: the former to the instance, the latter to the
# class
base_type = bind_self(base_type, self.scope.active_self_type())
compare_type = bind_self(compare_type, self.scope.active_self_type())
# If we are a static method, ensure to also tell the
# lvalue it now contains a static method
if base_static and compare_static:
lvalue_node.is_staticmethod = True
ok = self.check_subtype(
compare_type,
base_type,
rvalue,
message_registry.INCOMPATIBLE_TYPES_IN_ASSIGNMENT,
"expression has type",
f'base class "{base.name}" defined the type as',
)
if (
ok
and codes.MUTABLE_OVERRIDE in self.options.enabled_error_codes
and self.is_writable_attribute(base_node)
):
ok = self.check_subtype(
base_type,
compare_type,
rvalue,
message_registry.COVARIANT_OVERRIDE_OF_MUTABLE_ATTRIBUTE,
f'base class "{base.name}" defined the type as',
"expression has type",
)
return ok
return True
def lvalue_type_from_base(
self, expr_node: Var, base: TypeInfo
) -> tuple[Type | None, Node | None]:
"""For a NameExpr that is part of a class, walk all base classes and try
to find the first class that defines a Type for the same name."""
expr_name = expr_node.name
base_var = base.names.get(expr_name)
if base_var:
base_node = base_var.node
base_type = base_var.type
if isinstance(base_node, Var) and base_type is not None:
base_type = expand_self_type(base_node, base_type, fill_typevars(expr_node.info))
if isinstance(base_node, Decorator):
base_node = base_node.func
base_type = base_node.type
if base_type:
if not has_no_typevars(base_type):
self_type = self.scope.active_self_type()
assert self_type is not None, "Internal error: base lookup outside class"
if isinstance(self_type, TupleType):
instance = tuple_fallback(self_type)
else:
instance = self_type
itype = map_instance_to_supertype(instance, base)
base_type = expand_type_by_instance(base_type, itype)
base_type = get_proper_type(base_type)
if isinstance(base_type, CallableType) and isinstance(base_node, FuncDef):
# If we are a property, return the Type of the return
# value, not the Callable
if base_node.is_property:
base_type = get_proper_type(base_type.ret_type)
if isinstance(base_type, FunctionLike) and isinstance(
base_node, OverloadedFuncDef
):
# Same for properties with setter
if base_node.is_property:
base_type = base_type.items[0].ret_type
return base_type, base_node
return None, None
def check_compatibility_classvar_super(
self, node: Var, base: TypeInfo, base_node: Node | None
) -> bool:
if not isinstance(base_node, Var):
return True
if node.is_classvar and not base_node.is_classvar:
self.fail(message_registry.CANNOT_OVERRIDE_INSTANCE_VAR.format(base.name), node)
return False
elif not node.is_classvar and base_node.is_classvar:
self.fail(message_registry.CANNOT_OVERRIDE_CLASS_VAR.format(base.name), node)
return False
return True
def check_compatibility_final_super(
self, node: Var, base: TypeInfo, base_node: Node | None
) -> bool:
"""Check if an assignment overrides a final attribute in a base class.
This only checks situations where either a node in base class is not a variable
but a final method, or where override is explicitly declared as final.
In these cases we give a more detailed error message. In addition, we check that
a final variable doesn't override writeable attribute, which is not safe.
Other situations are checked in `check_final()`.
"""
if not isinstance(base_node, (Var, FuncBase, Decorator)):
return True
if is_private(node.name):
return True
if base_node.is_final and (node.is_final or not isinstance(base_node, Var)):
# Give this error only for explicit override attempt with `Final`, or
# if we are overriding a final method with variable.
# Other override attempts will be flagged as assignment to constant
# in `check_final()`.
self.msg.cant_override_final(node.name, base.name, node)
return False
if node.is_final:
if base.fullname in ENUM_BASES or node.name in ENUM_SPECIAL_PROPS:
return True
self.check_if_final_var_override_writable(node.name, base_node, node)
return True
def check_if_final_var_override_writable(
self, name: str, base_node: Node | None, ctx: Context
) -> None:
"""Check that a final variable doesn't override writeable attribute.
This is done to prevent situations like this:
class C:
attr = 1
class D(C):
attr: Final = 2
x: C = D()
x.attr = 3 # Oops!
"""
writable = True
if base_node:
writable = self.is_writable_attribute(base_node)
if writable:
self.msg.final_cant_override_writable(name, ctx)
def get_final_context(self) -> bool:
"""Check whether we a currently checking a final declaration."""
return self._is_final_def
@contextmanager
def enter_final_context(self, is_final_def: bool) -> Iterator[None]:
"""Store whether the current checked assignment is a final declaration."""
old_ctx = self._is_final_def
self._is_final_def = is_final_def
try:
yield
finally:
self._is_final_def = old_ctx
def check_final(self, s: AssignmentStmt | OperatorAssignmentStmt | AssignmentExpr) -> None:
"""Check if this assignment does not assign to a final attribute.
This function performs the check only for name assignments at module
and class scope. The assignments to `obj.attr` and `Cls.attr` are checked
in checkmember.py.
"""
if isinstance(s, AssignmentStmt):
lvs = self.flatten_lvalues(s.lvalues)
elif isinstance(s, AssignmentExpr):
lvs = [s.target]
else:
lvs = [s.lvalue]
is_final_decl = s.is_final_def if isinstance(s, AssignmentStmt) else False
if is_final_decl and self.scope.active_class():
lv = lvs[0]
assert isinstance(lv, RefExpr)
if lv.node is not None:
assert isinstance(lv.node, Var)
if (
lv.node.final_unset_in_class
and not lv.node.final_set_in_init
and not self.is_stub # It is OK to skip initializer in stub files.
and
# Avoid extra error messages, if there is no type in Final[...],
# then we already reported the error about missing r.h.s.
isinstance(s, AssignmentStmt)
and s.type is not None
):
self.msg.final_without_value(s)
for lv in lvs:
if isinstance(lv, RefExpr) and isinstance(lv.node, Var):
name = lv.node.name
cls = self.scope.active_class()
if cls is not None:
# These additional checks exist to give more error messages
# even if the final attribute was overridden with a new symbol
# (which is itself an error)...
for base in cls.mro[1:]:
sym = base.names.get(name)
# We only give this error if base node is variable,
# overriding final method will be caught in
# `check_compatibility_final_super()`.
if sym and isinstance(sym.node, Var):
if sym.node.is_final and not is_final_decl:
self.msg.cant_assign_to_final(name, sym.node.info is None, s)
# ...but only once
break
if lv.node.is_final and not is_final_decl:
self.msg.cant_assign_to_final(name, lv.node.info is None, s)
def check_assignment_to_slots(self, lvalue: Lvalue) -> None:
if not isinstance(lvalue, MemberExpr):
return
inst = get_proper_type(self.expr_checker.accept(lvalue.expr))
if not isinstance(inst, Instance):
return
if inst.type.slots is None:
return # Slots do not exist, we can allow any assignment
if lvalue.name in inst.type.slots:
return # We are assigning to an existing slot
for base_info in inst.type.mro[:-1]:
if base_info.names.get("__setattr__") is not None:
# When type has `__setattr__` defined,
# we can assign any dynamic value.
# We exclude object, because it always has `__setattr__`.
return
definition = inst.type.get(lvalue.name)
if definition is None:
# We don't want to duplicate
# `"SomeType" has no attribute "some_attr"`
# error twice.
return
if self.is_assignable_slot(lvalue, definition.type):
return
self.fail(
message_registry.NAME_NOT_IN_SLOTS.format(lvalue.name, inst.type.fullname), lvalue
)
def is_assignable_slot(self, lvalue: Lvalue, typ: Type | None) -> bool:
if getattr(lvalue, "node", None):
return False # This is a definition
typ = get_proper_type(typ)
if typ is None or isinstance(typ, AnyType):
return True # Any can be literally anything, like `@propery`
if isinstance(typ, Instance):
# When working with instances, we need to know if they contain
# `__set__` special method. Like `@property` does.
# This makes assigning to properties possible,
# even without extra slot spec.
return typ.type.get("__set__") is not None
if isinstance(typ, FunctionLike):
return True # Can be a property, or some other magic
if isinstance(typ, UnionType):
return all(self.is_assignable_slot(lvalue, u) for u in typ.items)
return False
def flatten_rvalues(self, rvalues: list[Expression]) -> list[Expression]:
"""Flatten expression list by expanding those * items that have tuple type.
For each regular type item in the tuple type use a TempNode(), for an Unpack
item use a corresponding StarExpr(TempNode()).
"""
new_rvalues = []
for rv in rvalues:
if not isinstance(rv, StarExpr):
new_rvalues.append(rv)
continue
typ = get_proper_type(self.expr_checker.accept(rv.expr))
if not isinstance(typ, TupleType):
new_rvalues.append(rv)
continue
for t in typ.items:
if not isinstance(t, UnpackType):
new_rvalues.append(TempNode(t))
else:
unpacked = get_proper_type(t.type)
if isinstance(unpacked, TypeVarTupleType):
fallback = unpacked.upper_bound
else:
assert (
isinstance(unpacked, Instance)
and unpacked.type.fullname == "builtins.tuple"
)
fallback = unpacked
new_rvalues.append(StarExpr(TempNode(fallback)))
return new_rvalues
def check_assignment_to_multiple_lvalues(
self,
lvalues: list[Lvalue],
rvalue: Expression,
context: Context,
infer_lvalue_type: bool = True,
) -> None:
if isinstance(rvalue, (TupleExpr, ListExpr)):
# Recursively go into Tuple or List expression rhs instead of
# using the type of rhs, because this allows more fine-grained
# control in cases like: a, b = [int, str] where rhs would get
# type List[object]
rvalues: list[Expression] = []
iterable_type: Type | None = None
last_idx: int | None = None
for idx_rval, rval in enumerate(self.flatten_rvalues(rvalue.items)):
if isinstance(rval, StarExpr):
typs = get_proper_type(self.expr_checker.accept(rval.expr))
if self.type_is_iterable(typs) and isinstance(typs, Instance):
if iterable_type is not None and iterable_type != self.iterable_item_type(
typs, rvalue
):
self.fail(message_registry.CONTIGUOUS_ITERABLE_EXPECTED, context)
else:
if last_idx is None or last_idx + 1 == idx_rval:
rvalues.append(rval)
last_idx = idx_rval
iterable_type = self.iterable_item_type(typs, rvalue)
else:
self.fail(message_registry.CONTIGUOUS_ITERABLE_EXPECTED, context)
else:
self.fail(message_registry.ITERABLE_TYPE_EXPECTED.format(typs), context)
else:
rvalues.append(rval)
iterable_start: int | None = None
iterable_end: int | None = None
for i, rval in enumerate(rvalues):
if isinstance(rval, StarExpr):
typs = get_proper_type(self.expr_checker.accept(rval.expr))
if self.type_is_iterable(typs) and isinstance(typs, Instance):
if iterable_start is None:
iterable_start = i
iterable_end = i
if (
iterable_start is not None
and iterable_end is not None
and iterable_type is not None
):
iterable_num = iterable_end - iterable_start + 1
rvalue_needed = len(lvalues) - (len(rvalues) - iterable_num)
if rvalue_needed > 0:
rvalues = (
rvalues[0:iterable_start]
+ [TempNode(iterable_type) for i in range(rvalue_needed)]
+ rvalues[iterable_end + 1 :]
)
if self.check_rvalue_count_in_assignment(lvalues, len(rvalues), context):
star_index = next(
(i for i, lv in enumerate(lvalues) if isinstance(lv, StarExpr)), len(lvalues)
)
left_lvs = lvalues[:star_index]
star_lv = (
cast(StarExpr, lvalues[star_index]) if star_index != len(lvalues) else None
)
right_lvs = lvalues[star_index + 1 :]
left_rvs, star_rvs, right_rvs = self.split_around_star(
rvalues, star_index, len(lvalues)
)
lr_pairs = list(zip(left_lvs, left_rvs))
if star_lv:
rv_list = ListExpr(star_rvs)
rv_list.set_line(rvalue)
lr_pairs.append((star_lv.expr, rv_list))
lr_pairs.extend(zip(right_lvs, right_rvs))
for lv, rv in lr_pairs:
self.check_assignment(lv, rv, infer_lvalue_type)
else:
self.check_multi_assignment(lvalues, rvalue, context, infer_lvalue_type)
def check_rvalue_count_in_assignment(
self,
lvalues: list[Lvalue],
rvalue_count: int,
context: Context,
rvalue_unpack: int | None = None,
) -> bool:
if rvalue_unpack is not None:
if not any(isinstance(e, StarExpr) for e in lvalues):
self.fail("Variadic tuple unpacking requires a star target", context)
return False
if len(lvalues) > rvalue_count:
self.fail(message_registry.TOO_MANY_TARGETS_FOR_VARIADIC_UNPACK, context)
return False
left_star_index = next(i for i, lv in enumerate(lvalues) if isinstance(lv, StarExpr))
left_prefix = left_star_index
left_suffix = len(lvalues) - left_star_index - 1
right_prefix = rvalue_unpack
right_suffix = rvalue_count - rvalue_unpack - 1
if left_suffix > right_suffix or left_prefix > right_prefix:
# Case of asymmetric unpack like:
# rv: tuple[int, *Ts, int, int]
# x, y, *xs, z = rv
# it is technically valid, but is tricky to reason about.
# TODO: support this (at least if the r.h.s. unpack is a homogeneous tuple).
self.fail(message_registry.TOO_MANY_TARGETS_FOR_VARIADIC_UNPACK, context)
return True
if any(isinstance(lvalue, StarExpr) for lvalue in lvalues):
if len(lvalues) - 1 > rvalue_count:
self.msg.wrong_number_values_to_unpack(rvalue_count, len(lvalues) - 1, context)
return False
elif rvalue_count != len(lvalues):
self.msg.wrong_number_values_to_unpack(rvalue_count, len(lvalues), context)
return False
return True
def check_multi_assignment(
self,
lvalues: list[Lvalue],
rvalue: Expression,
context: Context,
infer_lvalue_type: bool = True,
rv_type: Type | None = None,
undefined_rvalue: bool = False,
) -> None:
"""Check the assignment of one rvalue to a number of lvalues."""
# Infer the type of an ordinary rvalue expression.
# TODO: maybe elsewhere; redundant.
rvalue_type = get_proper_type(rv_type or self.expr_checker.accept(rvalue))
if isinstance(rvalue_type, TypeVarLikeType):
rvalue_type = get_proper_type(rvalue_type.upper_bound)
if isinstance(rvalue_type, UnionType):
# If this is an Optional type in non-strict Optional code, unwrap it.
relevant_items = rvalue_type.relevant_items()
if len(relevant_items) == 1:
rvalue_type = get_proper_type(relevant_items[0])
if (
isinstance(rvalue_type, TupleType)
and find_unpack_in_list(rvalue_type.items) is not None
):
# Normalize for consistent handling with "old-style" homogeneous tuples.
rvalue_type = expand_type(rvalue_type, {})
if isinstance(rvalue_type, AnyType):
for lv in lvalues:
if isinstance(lv, StarExpr):
lv = lv.expr
temp_node = self.temp_node(
AnyType(TypeOfAny.from_another_any, source_any=rvalue_type), context
)
self.check_assignment(lv, temp_node, infer_lvalue_type)
elif isinstance(rvalue_type, TupleType):
self.check_multi_assignment_from_tuple(
lvalues, rvalue, rvalue_type, context, undefined_rvalue, infer_lvalue_type
)
elif isinstance(rvalue_type, UnionType):
self.check_multi_assignment_from_union(
lvalues, rvalue, rvalue_type, context, infer_lvalue_type
)
elif isinstance(rvalue_type, Instance) and rvalue_type.type.fullname == "builtins.str":
self.msg.unpacking_strings_disallowed(context)
else:
self.check_multi_assignment_from_iterable(
lvalues, rvalue_type, context, infer_lvalue_type
)
def check_multi_assignment_from_union(
self,
lvalues: list[Expression],
rvalue: Expression,
rvalue_type: UnionType,
context: Context,
infer_lvalue_type: bool,
) -> None:
"""Check assignment to multiple lvalue targets when rvalue type is a Union[...].
For example:
t: Union[Tuple[int, int], Tuple[str, str]]
x, y = t
reveal_type(x) # Union[int, str]
The idea in this case is to process the assignment for every item of the union.
Important note: the types are collected in two places, 'union_types' contains
inferred types for first assignments, 'assignments' contains the narrowed types
for binder.
"""
self.no_partial_types = True
transposed: tuple[list[Type], ...] = tuple([] for _ in self.flatten_lvalues(lvalues))
# Notify binder that we want to defer bindings and instead collect types.
with self.binder.accumulate_type_assignments() as assignments:
for item in rvalue_type.items:
# Type check the assignment separately for each union item and collect
# the inferred lvalue types for each union item.
self.check_multi_assignment(
lvalues,
rvalue,
context,
infer_lvalue_type=infer_lvalue_type,
rv_type=item,
undefined_rvalue=True,
)
for t, lv in zip(transposed, self.flatten_lvalues(lvalues)):
# We can access _type_maps directly since temporary type maps are
# only created within expressions.
t.append(self._type_maps[0].pop(lv, AnyType(TypeOfAny.special_form)))
union_types = tuple(make_simplified_union(col) for col in transposed)
for expr, items in assignments.items():
# Bind a union of types collected in 'assignments' to every expression.
if isinstance(expr, StarExpr):
expr = expr.expr
# TODO: See todo in binder.py, ConditionalTypeBinder.assign_type
# It's unclear why the 'declared_type' param is sometimes 'None'
clean_items: list[tuple[Type, Type]] = []
for type, declared_type in items:
assert declared_type is not None
clean_items.append((type, declared_type))
types, declared_types = zip(*clean_items)
self.binder.assign_type(
expr,
make_simplified_union(list(types)),
make_simplified_union(list(declared_types)),
False,
)
for union, lv in zip(union_types, self.flatten_lvalues(lvalues)):
# Properly store the inferred types.
_1, _2, inferred = self.check_lvalue(lv)
if inferred:
self.set_inferred_type(inferred, lv, union)
else:
self.store_type(lv, union)
self.no_partial_types = False
def flatten_lvalues(self, lvalues: list[Expression]) -> list[Expression]:
res: list[Expression] = []
for lv in lvalues:
if isinstance(lv, (TupleExpr, ListExpr)):
res.extend(self.flatten_lvalues(lv.items))
if isinstance(lv, StarExpr):
# Unwrap StarExpr, since it is unwrapped by other helpers.
lv = lv.expr
res.append(lv)
return res
def check_multi_assignment_from_tuple(
self,
lvalues: list[Lvalue],
rvalue: Expression,
rvalue_type: TupleType,
context: Context,
undefined_rvalue: bool,
infer_lvalue_type: bool = True,
) -> None:
rvalue_unpack = find_unpack_in_list(rvalue_type.items)
if self.check_rvalue_count_in_assignment(
lvalues, len(rvalue_type.items), context, rvalue_unpack=rvalue_unpack
):
star_index = next(
(i for i, lv in enumerate(lvalues) if isinstance(lv, StarExpr)), len(lvalues)
)
left_lvs = lvalues[:star_index]
star_lv = cast(StarExpr, lvalues[star_index]) if star_index != len(lvalues) else None
right_lvs = lvalues[star_index + 1 :]
if not undefined_rvalue:
# Infer rvalue again, now in the correct type context.
lvalue_type = self.lvalue_type_for_inference(lvalues, rvalue_type)
reinferred_rvalue_type = get_proper_type(
self.expr_checker.accept(rvalue, lvalue_type)
)
if isinstance(reinferred_rvalue_type, TypeVarLikeType):
reinferred_rvalue_type = get_proper_type(reinferred_rvalue_type.upper_bound)
if isinstance(reinferred_rvalue_type, UnionType):
# If this is an Optional type in non-strict Optional code, unwrap it.
relevant_items = reinferred_rvalue_type.relevant_items()
if len(relevant_items) == 1:
reinferred_rvalue_type = get_proper_type(relevant_items[0])
if isinstance(reinferred_rvalue_type, UnionType):
self.check_multi_assignment_from_union(
lvalues, rvalue, reinferred_rvalue_type, context, infer_lvalue_type
)
return
if isinstance(reinferred_rvalue_type, AnyType):
# We can get Any if the current node is
# deferred. Doing more inference in deferred nodes
# is hard, so give up for now. We can also get
# here if reinferring types above changes the
# inferred return type for an overloaded function
# to be ambiguous.
return
assert isinstance(reinferred_rvalue_type, TupleType)
rvalue_type = reinferred_rvalue_type
left_rv_types, star_rv_types, right_rv_types = self.split_around_star(
rvalue_type.items, star_index, len(lvalues)
)
for lv, rv_type in zip(left_lvs, left_rv_types):
self.check_assignment(lv, self.temp_node(rv_type, context), infer_lvalue_type)
if star_lv:
list_expr = ListExpr(
[
(
self.temp_node(rv_type, context)
if not isinstance(rv_type, UnpackType)
else StarExpr(self.temp_node(rv_type.type, context))
)
for rv_type in star_rv_types
]
)
list_expr.set_line(context)
self.check_assignment(star_lv.expr, list_expr, infer_lvalue_type)
for lv, rv_type in zip(right_lvs, right_rv_types):
self.check_assignment(lv, self.temp_node(rv_type, context), infer_lvalue_type)
else:
# Store meaningful Any types for lvalues, errors are already given
# by check_rvalue_count_in_assignment()
if infer_lvalue_type:
for lv in lvalues:
if (
isinstance(lv, NameExpr)
and isinstance(lv.node, Var)
and lv.node.type is None
):
lv.node.type = AnyType(TypeOfAny.from_error)
elif isinstance(lv, StarExpr):
if (
isinstance(lv.expr, NameExpr)
and isinstance(lv.expr.node, Var)
and lv.expr.node.type is None
):
lv.expr.node.type = self.named_generic_type(
"builtins.list", [AnyType(TypeOfAny.from_error)]
)
def lvalue_type_for_inference(self, lvalues: list[Lvalue], rvalue_type: TupleType) -> Type:
star_index = next(
(i for i, lv in enumerate(lvalues) if isinstance(lv, StarExpr)), len(lvalues)
)
left_lvs = lvalues[:star_index]
star_lv = cast(StarExpr, lvalues[star_index]) if star_index != len(lvalues) else None
right_lvs = lvalues[star_index + 1 :]
left_rv_types, star_rv_types, right_rv_types = self.split_around_star(
rvalue_type.items, star_index, len(lvalues)
)
type_parameters: list[Type] = []
def append_types_for_inference(lvs: list[Expression], rv_types: list[Type]) -> None:
for lv, rv_type in zip(lvs, rv_types):
sub_lvalue_type, index_expr, inferred = self.check_lvalue(lv)
if sub_lvalue_type and not isinstance(sub_lvalue_type, PartialType):
type_parameters.append(sub_lvalue_type)
else: # index lvalue
# TODO Figure out more precise type context, probably
# based on the type signature of the _set method.
type_parameters.append(rv_type)
append_types_for_inference(left_lvs, left_rv_types)
if star_lv:
sub_lvalue_type, index_expr, inferred = self.check_lvalue(star_lv.expr)
if sub_lvalue_type and not isinstance(sub_lvalue_type, PartialType):
type_parameters.extend([sub_lvalue_type] * len(star_rv_types))
else: # index lvalue
# TODO Figure out more precise type context, probably
# based on the type signature of the _set method.
type_parameters.extend(star_rv_types)
append_types_for_inference(right_lvs, right_rv_types)
return TupleType(type_parameters, self.named_type("builtins.tuple"))
def split_around_star(
self, items: list[T], star_index: int, length: int
) -> tuple[list[T], list[T], list[T]]:
"""Splits a list of items in three to match another list of length 'length'
that contains a starred expression at 'star_index' in the following way:
star_index = 2, length = 5 (i.e., [a,b,*,c,d]), items = [1,2,3,4,5,6,7]
returns in: ([1,2], [3,4,5], [6,7])
"""
nr_right_of_star = length - star_index - 1
right_index = -nr_right_of_star if nr_right_of_star != 0 else len(items)
left = items[:star_index]
star = items[star_index:right_index]
right = items[right_index:]
return left, star, right
def type_is_iterable(self, type: Type) -> bool:
type = get_proper_type(type)
if isinstance(type, FunctionLike) and type.is_type_obj():
type = type.fallback
return is_subtype(
type, self.named_generic_type("typing.Iterable", [AnyType(TypeOfAny.special_form)])
)
def check_multi_assignment_from_iterable(
self,
lvalues: list[Lvalue],
rvalue_type: Type,
context: Context,
infer_lvalue_type: bool = True,
) -> None:
rvalue_type = get_proper_type(rvalue_type)
if self.type_is_iterable(rvalue_type) and isinstance(
rvalue_type, (Instance, CallableType, TypeType, Overloaded)
):
item_type = self.iterable_item_type(rvalue_type, context)
for lv in lvalues:
if isinstance(lv, StarExpr):
items_type = self.named_generic_type("builtins.list", [item_type])
self.check_assignment(
lv.expr, self.temp_node(items_type, context), infer_lvalue_type
)
else:
self.check_assignment(
lv, self.temp_node(item_type, context), infer_lvalue_type
)
else:
self.msg.type_not_iterable(rvalue_type, context)
def check_lvalue(self, lvalue: Lvalue) -> tuple[Type | None, IndexExpr | None, Var | None]:
lvalue_type = None
index_lvalue = None
inferred = None
if self.is_definition(lvalue) and (
not isinstance(lvalue, NameExpr) or isinstance(lvalue.node, Var)
):
if isinstance(lvalue, NameExpr):
assert isinstance(lvalue.node, Var)
inferred = lvalue.node
else:
assert isinstance(lvalue, MemberExpr)
self.expr_checker.accept(lvalue.expr)
inferred = lvalue.def_var
elif isinstance(lvalue, IndexExpr):
index_lvalue = lvalue
elif isinstance(lvalue, MemberExpr):
lvalue_type = self.expr_checker.analyze_ordinary_member_access(lvalue, True)
self.store_type(lvalue, lvalue_type)
elif isinstance(lvalue, NameExpr):
lvalue_type = self.expr_checker.analyze_ref_expr(lvalue, lvalue=True)
self.store_type(lvalue, lvalue_type)
elif isinstance(lvalue, (TupleExpr, ListExpr)):
types = [
self.check_lvalue(sub_expr)[0] or
# This type will be used as a context for further inference of rvalue,
# we put Uninhabited if there is no information available from lvalue.
UninhabitedType()
for sub_expr in lvalue.items
]
lvalue_type = TupleType(types, self.named_type("builtins.tuple"))
elif isinstance(lvalue, StarExpr):
lvalue_type, _, _ = self.check_lvalue(lvalue.expr)
else:
lvalue_type = self.expr_checker.accept(lvalue)
return lvalue_type, index_lvalue, inferred
def is_definition(self, s: Lvalue) -> bool:
if isinstance(s, NameExpr):
if s.is_inferred_def:
return True
# If the node type is not defined, this must the first assignment
# that we process => this is a definition, even though the semantic
# analyzer did not recognize this as such. This can arise in code
# that uses isinstance checks, if type checking of the primary
# definition is skipped due to an always False type check.
node = s.node
if isinstance(node, Var):
return node.type is None
elif isinstance(s, MemberExpr):
return s.is_inferred_def
return False
def infer_variable_type(
self, name: Var, lvalue: Lvalue, init_type: Type, context: Context
) -> None:
"""Infer the type of initialized variables from initializer type."""
if isinstance(init_type, DeletedType):
self.msg.deleted_as_rvalue(init_type, context)
elif (
not is_valid_inferred_type(init_type, is_lvalue_final=name.is_final)
and not self.no_partial_types
):
# We cannot use the type of the initialization expression for full type
# inference (it's not specific enough), but we might be able to give
# partial type which will be made more specific later. A partial type
# gets generated in assignment like 'x = []' where item type is not known.
if not self.infer_partial_type(name, lvalue, init_type):
self.msg.need_annotation_for_var(name, context, self.options.python_version)
self.set_inference_error_fallback_type(name, lvalue, init_type)
elif (
isinstance(lvalue, MemberExpr)
and self.inferred_attribute_types is not None
and lvalue.def_var
and lvalue.def_var in self.inferred_attribute_types
and not is_same_type(self.inferred_attribute_types[lvalue.def_var], init_type)
):
# Multiple, inconsistent types inferred for an attribute.
self.msg.need_annotation_for_var(name, context, self.options.python_version)
name.type = AnyType(TypeOfAny.from_error)
else:
# Infer type of the target.
# Make the type more general (strip away function names etc.).
init_type = strip_type(init_type)
self.set_inferred_type(name, lvalue, init_type)
def infer_partial_type(self, name: Var, lvalue: Lvalue, init_type: Type) -> bool:
init_type = get_proper_type(init_type)
if isinstance(init_type, NoneType):
partial_type = PartialType(None, name)
elif isinstance(init_type, Instance):
fullname = init_type.type.fullname
is_ref = isinstance(lvalue, RefExpr)
if (
is_ref
and (
fullname == "builtins.list"
or fullname == "builtins.set"
or fullname == "builtins.dict"
or fullname == "collections.OrderedDict"
)
and all(
isinstance(t, (NoneType, UninhabitedType))
for t in get_proper_types(init_type.args)
)
):
partial_type = PartialType(init_type.type, name)
elif is_ref and fullname == "collections.defaultdict":
arg0 = get_proper_type(init_type.args[0])
arg1 = get_proper_type(init_type.args[1])
if isinstance(
arg0, (NoneType, UninhabitedType)
) and self.is_valid_defaultdict_partial_value_type(arg1):
arg1 = erase_type(arg1)
assert isinstance(arg1, Instance)
partial_type = PartialType(init_type.type, name, arg1)
else:
return False
else:
return False
else:
return False
self.set_inferred_type(name, lvalue, partial_type)
self.partial_types[-1].map[name] = lvalue
return True
def is_valid_defaultdict_partial_value_type(self, t: ProperType) -> bool:
"""Check if t can be used as the basis for a partial defaultdict value type.
Examples:
* t is 'int' --> True
* t is 'list[Never]' --> True
* t is 'dict[...]' --> False (only generic types with a single type
argument supported)
"""
if not isinstance(t, Instance):
return False
if len(t.args) == 0:
return True
if len(t.args) == 1:
arg = get_proper_type(t.args[0])
if self.options.old_type_inference:
# Allow leaked TypeVars for legacy inference logic.
allowed = isinstance(arg, (UninhabitedType, NoneType, TypeVarType))
else:
allowed = isinstance(arg, (UninhabitedType, NoneType))
if allowed:
return True
return False
def set_inferred_type(self, var: Var, lvalue: Lvalue, type: Type) -> None:
"""Store inferred variable type.
Store the type to both the variable node and the expression node that
refers to the variable (lvalue). If var is None, do nothing.
"""
if var and not self.current_node_deferred:
var.type = type
var.is_inferred = True
if var not in self.var_decl_frames:
# Used for the hack to improve optional type inference in conditionals
self.var_decl_frames[var] = {frame.id for frame in self.binder.frames}
if isinstance(lvalue, MemberExpr) and self.inferred_attribute_types is not None:
# Store inferred attribute type so that we can check consistency afterwards.
if lvalue.def_var is not None:
self.inferred_attribute_types[lvalue.def_var] = type
self.store_type(lvalue, type)
def set_inference_error_fallback_type(self, var: Var, lvalue: Lvalue, type: Type) -> None:
"""Store best known type for variable if type inference failed.
If a program ignores error on type inference error, the variable should get some
inferred type so that if can used later on in the program. Example:
x = [] # type: ignore
x.append(1) # Should be ok!
We implement this here by giving x a valid type (replacing inferred Never with Any).
"""
fallback = self.inference_error_fallback_type(type)
self.set_inferred_type(var, lvalue, fallback)
def inference_error_fallback_type(self, type: Type) -> Type:
fallback = type.accept(SetNothingToAny())
# Type variables may leak from inference, see https://github.com/python/mypy/issues/5738,
# we therefore need to erase them.
return erase_typevars(fallback)
def simple_rvalue(self, rvalue: Expression) -> bool:
"""Returns True for expressions for which inferred type should not depend on context.
Note that this function can still return False for some expressions where inferred type
does not depend on context. It only exists for performance optimizations.
"""
if isinstance(rvalue, (IntExpr, StrExpr, BytesExpr, FloatExpr, RefExpr)):
return True
if isinstance(rvalue, CallExpr):
if isinstance(rvalue.callee, RefExpr) and isinstance(rvalue.callee.node, FuncBase):
typ = rvalue.callee.node.type
if isinstance(typ, CallableType):
return not typ.variables
elif isinstance(typ, Overloaded):
return not any(item.variables for item in typ.items)
return False
def check_simple_assignment(
self,
lvalue_type: Type | None,
rvalue: Expression,
context: Context,
msg: ErrorMessage = message_registry.INCOMPATIBLE_TYPES_IN_ASSIGNMENT,
lvalue_name: str = "variable",
rvalue_name: str = "expression",
*,
notes: list[str] | None = None,
) -> Type:
if self.is_stub and isinstance(rvalue, EllipsisExpr):
# '...' is always a valid initializer in a stub.
return AnyType(TypeOfAny.special_form)
else:
always_allow_any = lvalue_type is not None and not isinstance(
get_proper_type(lvalue_type), AnyType
)
rvalue_type = self.expr_checker.accept(
rvalue, lvalue_type, always_allow_any=always_allow_any
)
if (
isinstance(get_proper_type(lvalue_type), UnionType)
# Skip literal types, as they have special logic (for better errors).
and not isinstance(get_proper_type(rvalue_type), LiteralType)
and not self.simple_rvalue(rvalue)
):
# Try re-inferring r.h.s. in empty context, and use that if it
# results in a narrower type. We don't do this always because this
# may cause some perf impact, plus we want to partially preserve
# the old behavior. This helps with various practical examples, see
# e.g. testOptionalTypeNarrowedByGenericCall.
with self.msg.filter_errors() as local_errors, self.local_type_map() as type_map:
alt_rvalue_type = self.expr_checker.accept(
rvalue, None, always_allow_any=always_allow_any
)
if (
not local_errors.has_new_errors()
# Skip Any type, since it is special cased in binder.
and not isinstance(get_proper_type(alt_rvalue_type), AnyType)
and is_valid_inferred_type(alt_rvalue_type)
and is_proper_subtype(alt_rvalue_type, rvalue_type)
):
rvalue_type = alt_rvalue_type
self.store_types(type_map)
if isinstance(rvalue_type, DeletedType):
self.msg.deleted_as_rvalue(rvalue_type, context)
if isinstance(lvalue_type, DeletedType):
self.msg.deleted_as_lvalue(lvalue_type, context)
elif lvalue_type:
self.check_subtype(
# Preserve original aliases for error messages when possible.
rvalue_type,
lvalue_type,
context,
msg,
f"{rvalue_name} has type",
f"{lvalue_name} has type",
notes=notes,
)
return rvalue_type
def check_member_assignment(
self, instance_type: Type, attribute_type: Type, rvalue: Expression, context: Context
) -> tuple[Type, Type, bool]:
"""Type member assignment.
This defers to check_simple_assignment, unless the member expression
is a descriptor, in which case this checks descriptor semantics as well.
Return the inferred rvalue_type, inferred lvalue_type, and whether to use the binder
for this assignment.
Note: this method exists here and not in checkmember.py, because we need to take
care about interaction between binder and __set__().
"""
instance_type = get_proper_type(instance_type)
attribute_type = get_proper_type(attribute_type)
# Descriptors don't participate in class-attribute access
if (isinstance(instance_type, FunctionLike) and instance_type.is_type_obj()) or isinstance(
instance_type, TypeType
):
rvalue_type = self.check_simple_assignment(attribute_type, rvalue, context)
return rvalue_type, attribute_type, True
if not isinstance(attribute_type, Instance):
# TODO: support __set__() for union types.
rvalue_type = self.check_simple_assignment(attribute_type, rvalue, context)
return rvalue_type, attribute_type, True
mx = MemberContext(
is_lvalue=False,
is_super=False,
is_operator=False,
original_type=instance_type,
context=context,
self_type=None,
msg=self.msg,
chk=self,
)
get_type = analyze_descriptor_access(attribute_type, mx)
if not attribute_type.type.has_readable_member("__set__"):
# If there is no __set__, we type-check that the assigned value matches
# the return type of __get__. This doesn't match the python semantics,
# (which allow you to override the descriptor with any value), but preserves
# the type of accessing the attribute (even after the override).
rvalue_type = self.check_simple_assignment(get_type, rvalue, context)
return rvalue_type, get_type, True
dunder_set = attribute_type.type.get_method("__set__")
if dunder_set is None:
self.fail(
message_registry.DESCRIPTOR_SET_NOT_CALLABLE.format(
attribute_type.str_with_options(self.options)
),
context,
)
return AnyType(TypeOfAny.from_error), get_type, False
bound_method = analyze_decorator_or_funcbase_access(
defn=dunder_set,
itype=attribute_type,
info=attribute_type.type,
self_type=attribute_type,
name="__set__",
mx=mx,
)
typ = map_instance_to_supertype(attribute_type, dunder_set.info)
dunder_set_type = expand_type_by_instance(bound_method, typ)
callable_name = self.expr_checker.method_fullname(attribute_type, "__set__")
dunder_set_type = self.expr_checker.transform_callee_type(
callable_name,
dunder_set_type,
[TempNode(instance_type, context=context), rvalue],
[nodes.ARG_POS, nodes.ARG_POS],
context,
object_type=attribute_type,
)
# For non-overloaded setters, the result should be type-checked like a regular assignment.
# Hence, we first only try to infer the type by using the rvalue as type context.
type_context = rvalue
with self.msg.filter_errors():
_, inferred_dunder_set_type = self.expr_checker.check_call(
dunder_set_type,
[TempNode(instance_type, context=context), type_context],
[nodes.ARG_POS, nodes.ARG_POS],
context,
object_type=attribute_type,
callable_name=callable_name,
)
# And now we in fact type check the call, to show errors related to wrong arguments
# count, etc., replacing the type context for non-overloaded setters only.
inferred_dunder_set_type = get_proper_type(inferred_dunder_set_type)
if isinstance(inferred_dunder_set_type, CallableType):
type_context = TempNode(AnyType(TypeOfAny.special_form), context=context)
self.expr_checker.check_call(
dunder_set_type,
[TempNode(instance_type, context=context), type_context],
[nodes.ARG_POS, nodes.ARG_POS],
context,
object_type=attribute_type,
callable_name=callable_name,
)
# In the following cases, a message already will have been recorded in check_call.
if (not isinstance(inferred_dunder_set_type, CallableType)) or (
len(inferred_dunder_set_type.arg_types) < 2
):
return AnyType(TypeOfAny.from_error), get_type, False
set_type = inferred_dunder_set_type.arg_types[1]
# Special case: if the rvalue_type is a subtype of both '__get__' and '__set__' types,
# and '__get__' type is narrower than '__set__', then we invoke the binder to narrow type
# by this assignment. Technically, this is not safe, but in practice this is
# what a user expects.
rvalue_type = self.check_simple_assignment(set_type, rvalue, context)
infer = is_subtype(rvalue_type, get_type) and is_subtype(get_type, set_type)
return rvalue_type if infer else set_type, get_type, infer
def check_indexed_assignment(
self, lvalue: IndexExpr, rvalue: Expression, context: Context
) -> None:
"""Type check indexed assignment base[index] = rvalue.
The lvalue argument is the base[index] expression.
"""
self.try_infer_partial_type_from_indexed_assignment(lvalue, rvalue)
basetype = get_proper_type(self.expr_checker.accept(lvalue.base))
method_type = self.expr_checker.analyze_external_member_access(
"__setitem__", basetype, lvalue
)
lvalue.method_type = method_type
res_type, _ = self.expr_checker.check_method_call(
"__setitem__",
basetype,
method_type,
[lvalue.index, rvalue],
[nodes.ARG_POS, nodes.ARG_POS],
context,
)
res_type = get_proper_type(res_type)
if isinstance(res_type, UninhabitedType) and not res_type.ambiguous:
self.binder.unreachable()
def try_infer_partial_type_from_indexed_assignment(
self, lvalue: IndexExpr, rvalue: Expression
) -> None:
# TODO: Should we share some of this with try_infer_partial_type?
var = None
if isinstance(lvalue.base, RefExpr) and isinstance(lvalue.base.node, Var):
var = lvalue.base.node
elif isinstance(lvalue.base, MemberExpr):
var = self.expr_checker.get_partial_self_var(lvalue.base)
if isinstance(var, Var):
if isinstance(var.type, PartialType):
type_type = var.type.type
if type_type is None:
return # The partial type is None.
partial_types = self.find_partial_types(var)
if partial_types is None:
return
typename = type_type.fullname
if (
typename == "builtins.dict"
or typename == "collections.OrderedDict"
or typename == "collections.defaultdict"
):
# TODO: Don't infer things twice.
key_type = self.expr_checker.accept(lvalue.index)
value_type = self.expr_checker.accept(rvalue)
if (
is_valid_inferred_type(key_type)
and is_valid_inferred_type(value_type)
and not self.current_node_deferred
and not (
typename == "collections.defaultdict"
and var.type.value_type is not None
and not is_equivalent(value_type, var.type.value_type)
)
):
var.type = self.named_generic_type(typename, [key_type, value_type])
del partial_types[var]
def type_requires_usage(self, typ: Type) -> tuple[str, ErrorCode] | None:
"""Some types require usage in all cases. The classic example is
an unused coroutine.
In the case that it does require usage, returns a note to attach
to the error message.
"""
proper_type = get_proper_type(typ)
if isinstance(proper_type, Instance):
# We use different error codes for generic awaitable vs coroutine.
# Coroutines are on by default, whereas generic awaitables are not.
if proper_type.type.fullname == "typing.Coroutine":
return ("Are you missing an await?", UNUSED_COROUTINE)
if proper_type.type.get("__await__") is not None:
return ("Are you missing an await?", UNUSED_AWAITABLE)
return None
def visit_expression_stmt(self, s: ExpressionStmt) -> None:
expr_type = self.expr_checker.accept(s.expr, allow_none_return=True, always_allow_any=True)
error_note_and_code = self.type_requires_usage(expr_type)
if error_note_and_code:
error_note, code = error_note_and_code
self.fail(
message_registry.TYPE_MUST_BE_USED.format(format_type(expr_type, self.options)),
s,
code=code,
)
self.note(error_note, s, code=code)
def visit_return_stmt(self, s: ReturnStmt) -> None:
"""Type check a return statement."""
self.check_return_stmt(s)
self.binder.unreachable()
def check_return_stmt(self, s: ReturnStmt) -> None:
defn = self.scope.top_function()
if defn is not None:
if defn.is_generator:
return_type = self.get_generator_return_type(
self.return_types[-1], defn.is_coroutine
)
elif defn.is_coroutine:
return_type = self.get_coroutine_return_type(self.return_types[-1])
else:
return_type = self.return_types[-1]
return_type = get_proper_type(return_type)
is_lambda = isinstance(self.scope.top_function(), LambdaExpr)
if isinstance(return_type, UninhabitedType):
# Avoid extra error messages for failed inference in lambdas
if not is_lambda and not return_type.ambiguous:
self.fail(message_registry.NO_RETURN_EXPECTED, s)
return
if s.expr:
declared_none_return = isinstance(return_type, NoneType)
declared_any_return = isinstance(return_type, AnyType)
# This controls whether or not we allow a function call that
# returns None as the expression of this return statement.
# E.g. `return f()` for some `f` that returns None. We allow
# this only if we're in a lambda or in a function that returns
# `None` or `Any`.
allow_none_func_call = is_lambda or declared_none_return or declared_any_return
# Return with a value.
typ = get_proper_type(
self.expr_checker.accept(
s.expr, return_type, allow_none_return=allow_none_func_call
)
)
if defn.is_async_generator:
self.fail(message_registry.RETURN_IN_ASYNC_GENERATOR, s)
return
# Returning a value of type Any is always fine.
if isinstance(typ, AnyType):
# (Unless you asked to be warned in that case, and the
# function is not declared to return Any)
if (
self.options.warn_return_any
and not self.current_node_deferred
and not is_proper_subtype(AnyType(TypeOfAny.special_form), return_type)
and not (
defn.name in BINARY_MAGIC_METHODS
and is_literal_not_implemented(s.expr)
)
and not (
isinstance(return_type, Instance)
and return_type.type.fullname == "builtins.object"
)
and not is_lambda
):
self.msg.incorrectly_returning_any(return_type, s)
return
# Disallow return expressions in functions declared to return
# None, subject to two exceptions below.
if declared_none_return:
# Lambdas are allowed to have None returns.
# Functions returning a value of type None are allowed to have a None return.
if is_lambda or isinstance(typ, NoneType):
return
self.fail(message_registry.NO_RETURN_VALUE_EXPECTED, s)
else:
self.check_subtype(
subtype_label="got",
subtype=typ,
supertype_label="expected",
supertype=return_type,
context=s.expr,
outer_context=s,
msg=message_registry.INCOMPATIBLE_RETURN_VALUE_TYPE,
)
else:
# Empty returns are valid in Generators with Any typed returns, but not in
# coroutines.
if (
defn.is_generator
and not defn.is_coroutine
and isinstance(return_type, AnyType)
):
return
if isinstance(return_type, (NoneType, AnyType)):
return
if self.in_checked_function():
self.fail(message_registry.RETURN_VALUE_EXPECTED, s)
def visit_if_stmt(self, s: IfStmt) -> None:
"""Type check an if statement."""
# This frame records the knowledge from previous if/elif clauses not being taken.
# Fall-through to the original frame is handled explicitly in each block.
with self.binder.frame_context(can_skip=False, conditional_frame=True, fall_through=0):
for e, b in zip(s.expr, s.body):
t = get_proper_type(self.expr_checker.accept(e))
if isinstance(t, DeletedType):
self.msg.deleted_as_rvalue(t, s)
if_map, else_map = self.find_isinstance_check(e)
# XXX Issue a warning if condition is always False?
with self.binder.frame_context(can_skip=True, fall_through=2):
self.push_type_map(if_map)
self.accept(b)
# XXX Issue a warning if condition is always True?
self.push_type_map(else_map)
with self.binder.frame_context(can_skip=False, fall_through=2):
if s.else_body:
self.accept(s.else_body)
def visit_while_stmt(self, s: WhileStmt) -> None:
"""Type check a while statement."""
if_stmt = IfStmt([s.expr], [s.body], None)
if_stmt.set_line(s)
self.accept_loop(if_stmt, s.else_body, exit_condition=s.expr)
def visit_operator_assignment_stmt(self, s: OperatorAssignmentStmt) -> None:
"""Type check an operator assignment statement, e.g. x += 1."""
self.try_infer_partial_generic_type_from_assignment(s.lvalue, s.rvalue, s.op)
if isinstance(s.lvalue, MemberExpr):
# Special case, some additional errors may be given for
# assignments to read-only or final attributes.
lvalue_type = self.expr_checker.visit_member_expr(s.lvalue, True)
else:
lvalue_type = self.expr_checker.accept(s.lvalue)
inplace, method = infer_operator_assignment_method(lvalue_type, s.op)
if inplace:
# There is __ifoo__, treat as x = x.__ifoo__(y)
rvalue_type, method_type = self.expr_checker.check_op(method, lvalue_type, s.rvalue, s)
if not is_subtype(rvalue_type, lvalue_type):
self.msg.incompatible_operator_assignment(s.op, s)
else:
# There is no __ifoo__, treat as x = x <foo> y
expr = OpExpr(s.op, s.lvalue, s.rvalue)
expr.set_line(s)
self.check_assignment(
lvalue=s.lvalue, rvalue=expr, infer_lvalue_type=True, new_syntax=False
)
self.check_final(s)
def visit_assert_stmt(self, s: AssertStmt) -> None:
self.expr_checker.accept(s.expr)
if isinstance(s.expr, TupleExpr) and len(s.expr.items) > 0:
self.fail(message_registry.MALFORMED_ASSERT, s)
# If this is asserting some isinstance check, bind that type in the following code
true_map, else_map = self.find_isinstance_check(s.expr)
if s.msg is not None:
self.expr_checker.analyze_cond_branch(else_map, s.msg, None)
self.push_type_map(true_map)
def visit_raise_stmt(self, s: RaiseStmt) -> None:
"""Type check a raise statement."""
if s.expr:
self.type_check_raise(s.expr, s)
if s.from_expr:
self.type_check_raise(s.from_expr, s, optional=True)
self.binder.unreachable()
def type_check_raise(self, e: Expression, s: RaiseStmt, optional: bool = False) -> None:
typ = get_proper_type(self.expr_checker.accept(e))
if isinstance(typ, DeletedType):
self.msg.deleted_as_rvalue(typ, e)
return
exc_type = self.named_type("builtins.BaseException")
expected_type_items = [exc_type, TypeType(exc_type)]
if optional:
# This is used for `x` part in a case like `raise e from x`,
# where we allow `raise e from None`.
expected_type_items.append(NoneType())
self.check_subtype(
typ, UnionType.make_union(expected_type_items), s, message_registry.INVALID_EXCEPTION
)
if isinstance(typ, FunctionLike):
# https://github.com/python/mypy/issues/11089
self.expr_checker.check_call(typ, [], [], e)
def visit_try_stmt(self, s: TryStmt) -> None:
"""Type check a try statement."""
# Our enclosing frame will get the result if the try/except falls through.
# This one gets all possible states after the try block exited abnormally
# (by exception, return, break, etc.)
with self.binder.frame_context(can_skip=False, fall_through=0):
# Not only might the body of the try statement exit
# abnormally, but so might an exception handler or else
# clause. The finally clause runs in *all* cases, so we
# need an outer try frame to catch all intermediate states
# in case an exception is raised during an except or else
# clause. As an optimization, only create the outer try
# frame when there actually is a finally clause.
self.visit_try_without_finally(s, try_frame=bool(s.finally_body))
if s.finally_body:
# First we check finally_body is type safe on all abnormal exit paths
self.accept(s.finally_body)
if s.finally_body:
# Then we try again for the more restricted set of options
# that can fall through. (Why do we need to check the
# finally clause twice? Depending on whether the finally
# clause was reached by the try clause falling off the end
# or exiting abnormally, after completing the finally clause
# either flow will continue to after the entire try statement
# or the exception/return/etc. will be processed and control
# flow will escape. We need to check that the finally clause
# type checks in both contexts, but only the resulting types
# from the latter context affect the type state in the code
# that follows the try statement.)
if not self.binder.is_unreachable():
self.accept(s.finally_body)
def visit_try_without_finally(self, s: TryStmt, try_frame: bool) -> None:
"""Type check a try statement, ignoring the finally block.
On entry, the top frame should receive all flow that exits the
try block abnormally (i.e., such that the else block does not
execute), and its parent should receive all flow that exits
the try block normally.
"""
# This frame will run the else block if the try fell through.
# In that case, control flow continues to the parent of what
# was the top frame on entry.
with self.binder.frame_context(can_skip=False, fall_through=2, try_frame=try_frame):
# This frame receives exit via exception, and runs exception handlers
with self.binder.frame_context(can_skip=False, conditional_frame=True, fall_through=2):
# Finally, the body of the try statement
with self.binder.frame_context(can_skip=False, fall_through=2, try_frame=True):
self.accept(s.body)
for i in range(len(s.handlers)):
with self.binder.frame_context(can_skip=True, fall_through=4):
typ = s.types[i]
if typ:
t = self.check_except_handler_test(typ, s.is_star)
var = s.vars[i]
if var:
# To support local variables, we make this a definition line,
# causing assignment to set the variable's type.
var.is_inferred_def = True
self.check_assignment(var, self.temp_node(t, var))
self.accept(s.handlers[i])
var = s.vars[i]
if var:
# Exception variables are deleted.
# Unfortunately, this doesn't let us detect usage before the
# try/except block.
source = var.name
if isinstance(var.node, Var):
var.node.type = DeletedType(source=source)
self.binder.cleanse(var)
if s.else_body:
self.accept(s.else_body)
def check_except_handler_test(self, n: Expression, is_star: bool) -> Type:
"""Type check an exception handler test clause."""
typ = self.expr_checker.accept(n)
all_types: list[Type] = []
test_types = self.get_types_from_except_handler(typ, n)
for ttype in get_proper_types(test_types):
if isinstance(ttype, AnyType):
all_types.append(ttype)
continue
if isinstance(ttype, FunctionLike):
item = ttype.items[0]
if not item.is_type_obj():
self.fail(message_registry.INVALID_EXCEPTION_TYPE, n)
return self.default_exception_type(is_star)
exc_type = erase_typevars(item.ret_type)
elif isinstance(ttype, TypeType):
exc_type = ttype.item
else:
self.fail(message_registry.INVALID_EXCEPTION_TYPE, n)
return self.default_exception_type(is_star)
if not is_subtype(exc_type, self.named_type("builtins.BaseException")):
self.fail(message_registry.INVALID_EXCEPTION_TYPE, n)
return self.default_exception_type(is_star)
all_types.append(exc_type)
if is_star:
new_all_types: list[Type] = []
for typ in all_types:
if is_proper_subtype(typ, self.named_type("builtins.BaseExceptionGroup")):
self.fail(message_registry.INVALID_EXCEPTION_GROUP, n)
new_all_types.append(AnyType(TypeOfAny.from_error))
else:
new_all_types.append(typ)
return self.wrap_exception_group(new_all_types)
return make_simplified_union(all_types)
def default_exception_type(self, is_star: bool) -> Type:
"""Exception type to return in case of a previous type error."""
any_type = AnyType(TypeOfAny.from_error)
if is_star:
return self.named_generic_type("builtins.ExceptionGroup", [any_type])
return any_type
def wrap_exception_group(self, types: Sequence[Type]) -> Type:
"""Transform except* variable type into an appropriate exception group."""
arg = make_simplified_union(types)
if is_subtype(arg, self.named_type("builtins.Exception")):
base = "builtins.ExceptionGroup"
else:
base = "builtins.BaseExceptionGroup"
return self.named_generic_type(base, [arg])
def get_types_from_except_handler(self, typ: Type, n: Expression) -> list[Type]:
"""Helper for check_except_handler_test to retrieve handler types."""
typ = get_proper_type(typ)
if isinstance(typ, TupleType):
return typ.items
elif isinstance(typ, UnionType):
return [
union_typ
for item in typ.relevant_items()
for union_typ in self.get_types_from_except_handler(item, n)
]
elif is_named_instance(typ, "builtins.tuple"):
# variadic tuple
return [typ.args[0]]
else:
return [typ]
def visit_for_stmt(self, s: ForStmt) -> None:
"""Type check a for statement."""
if s.is_async:
iterator_type, item_type = self.analyze_async_iterable_item_type(s.expr)
else:
iterator_type, item_type = self.analyze_iterable_item_type(s.expr)
s.inferred_item_type = item_type
s.inferred_iterator_type = iterator_type
self.analyze_index_variables(s.index, item_type, s.index_type is None, s)
self.accept_loop(s.body, s.else_body)
def analyze_async_iterable_item_type(self, expr: Expression) -> tuple[Type, Type]:
"""Analyse async iterable expression and return iterator and iterator item types."""
echk = self.expr_checker
iterable = echk.accept(expr)
iterator = echk.check_method_call_by_name("__aiter__", iterable, [], [], expr)[0]
awaitable = echk.check_method_call_by_name("__anext__", iterator, [], [], expr)[0]
item_type = echk.check_awaitable_expr(
awaitable, expr, message_registry.INCOMPATIBLE_TYPES_IN_ASYNC_FOR
)
return iterator, item_type
def analyze_iterable_item_type(self, expr: Expression) -> tuple[Type, Type]:
"""Analyse iterable expression and return iterator and iterator item types."""
iterator, iterable = self.analyze_iterable_item_type_without_expression(
self.expr_checker.accept(expr), context=expr
)
int_type = self.analyze_range_native_int_type(expr)
if int_type:
return iterator, int_type
return iterator, iterable
def analyze_iterable_item_type_without_expression(
self, type: Type, context: Context
) -> tuple[Type, Type]:
"""Analyse iterable type and return iterator and iterator item types."""
echk = self.expr_checker
iterable: Type
iterable = get_proper_type(type)
iterator = echk.check_method_call_by_name("__iter__", iterable, [], [], context)[0]
if (
isinstance(iterable, TupleType)
and iterable.partial_fallback.type.fullname == "builtins.tuple"
):
return iterator, tuple_fallback(iterable).args[0]
else:
# Non-tuple iterable.
iterable = echk.check_method_call_by_name("__next__", iterator, [], [], context)[0]
return iterator, iterable
def analyze_range_native_int_type(self, expr: Expression) -> Type | None:
"""Try to infer native int item type from arguments to range(...).
For example, return i64 if the expression is "range(0, i64(n))".
Return None if unsuccessful.
"""
if (
isinstance(expr, CallExpr)
and isinstance(expr.callee, RefExpr)
and expr.callee.fullname == "builtins.range"
and 1 <= len(expr.args) <= 3
and all(kind == ARG_POS for kind in expr.arg_kinds)
):
native_int: Type | None = None
ok = True
for arg in expr.args:
argt = get_proper_type(self.lookup_type(arg))
if isinstance(argt, Instance) and argt.type.fullname in MYPYC_NATIVE_INT_NAMES:
if native_int is None:
native_int = argt
elif argt != native_int:
ok = False
if ok and native_int:
return native_int
return None
def analyze_container_item_type(self, typ: Type) -> Type | None:
"""Check if a type is a nominal container of a union of such.
Return the corresponding container item type.
"""
typ = get_proper_type(typ)
if isinstance(typ, UnionType):
types: list[Type] = []
for item in typ.items:
c_type = self.analyze_container_item_type(item)
if c_type:
types.append(c_type)
return UnionType.make_union(types)
if isinstance(typ, Instance) and typ.type.has_base("typing.Container"):
supertype = self.named_type("typing.Container").type
super_instance = map_instance_to_supertype(typ, supertype)
assert len(super_instance.args) == 1
return super_instance.args[0]
if isinstance(typ, TupleType):
return self.analyze_container_item_type(tuple_fallback(typ))
return None
def analyze_index_variables(
self, index: Expression, item_type: Type, infer_lvalue_type: bool, context: Context
) -> None:
"""Type check or infer for loop or list comprehension index vars."""
self.check_assignment(index, self.temp_node(item_type, context), infer_lvalue_type)
def visit_del_stmt(self, s: DelStmt) -> None:
if isinstance(s.expr, IndexExpr):
e = s.expr
m = MemberExpr(e.base, "__delitem__")
m.line = s.line
m.column = s.column
c = CallExpr(m, [e.index], [nodes.ARG_POS], [None])
c.line = s.line
c.column = s.column
self.expr_checker.accept(c, allow_none_return=True)
else:
s.expr.accept(self.expr_checker)
for elt in flatten(s.expr):
if isinstance(elt, NameExpr):
self.binder.assign_type(
elt, DeletedType(source=elt.name), get_declaration(elt), False
)
def visit_decorator(self, e: Decorator) -> None:
for d in e.decorators:
if isinstance(d, RefExpr):
if d.fullname == "typing.no_type_check":
e.var.type = AnyType(TypeOfAny.special_form)
e.var.is_ready = True
return
self.visit_decorator_inner(e)
def visit_decorator_inner(self, e: Decorator, allow_empty: bool = False) -> None:
if self.recurse_into_functions:
with self.tscope.function_scope(e.func):
self.check_func_item(e.func, name=e.func.name, allow_empty=allow_empty)
# Process decorators from the inside out to determine decorated signature, which
# may be different from the declared signature.
sig: Type = self.function_type(e.func)
for d in reversed(e.decorators):
if refers_to_fullname(d, OVERLOAD_NAMES):
if not allow_empty:
self.fail(message_registry.MULTIPLE_OVERLOADS_REQUIRED, e)
continue
dec = self.expr_checker.accept(d)
temp = self.temp_node(sig, context=e)
fullname = None
if isinstance(d, RefExpr):
fullname = d.fullname or None
# if this is a expression like @b.a where b is an object, get the type of b
# so we can pass it the method hook in the plugins
object_type: Type | None = None
if fullname is None and isinstance(d, MemberExpr) and self.has_type(d.expr):
object_type = self.lookup_type(d.expr)
fullname = self.expr_checker.method_fullname(object_type, d.name)
self.check_for_untyped_decorator(e.func, dec, d)
sig, t2 = self.expr_checker.check_call(
dec, [temp], [nodes.ARG_POS], e, callable_name=fullname, object_type=object_type
)
self.check_untyped_after_decorator(sig, e.func)
sig = set_callable_name(sig, e.func)
e.var.type = sig
e.var.is_ready = True
if e.func.is_property:
if isinstance(sig, CallableType):
if len([k for k in sig.arg_kinds if k.is_required()]) > 1:
self.msg.fail("Too many arguments for property", e)
self.check_incompatible_property_override(e)
# For overloaded functions we already checked override for overload as a whole.
if allow_empty:
return
if e.func.info and not e.func.is_dynamic() and not e.is_overload:
found_method_base_classes = self.check_method_override(e)
if (
e.func.is_explicit_override
and not found_method_base_classes
and found_method_base_classes is not None
):
self.msg.no_overridable_method(e.func.name, e.func)
self.check_explicit_override_decorator(e.func, found_method_base_classes)
if e.func.info and e.func.name in ("__init__", "__new__"):
if e.type and not isinstance(get_proper_type(e.type), (FunctionLike, AnyType)):
self.fail(message_registry.BAD_CONSTRUCTOR_TYPE, e)
def check_for_untyped_decorator(
self, func: FuncDef, dec_type: Type, dec_expr: Expression
) -> None:
if (
self.options.disallow_untyped_decorators
and is_typed_callable(func.type)
and is_untyped_decorator(dec_type)
):
self.msg.typed_function_untyped_decorator(func.name, dec_expr)
def check_incompatible_property_override(self, e: Decorator) -> None:
if not e.var.is_settable_property and e.func.info:
name = e.func.name
for base in e.func.info.mro[1:]:
base_attr = base.names.get(name)
if not base_attr:
continue
if (
isinstance(base_attr.node, OverloadedFuncDef)
and base_attr.node.is_property
and cast(Decorator, base_attr.node.items[0]).var.is_settable_property
):
self.fail(message_registry.READ_ONLY_PROPERTY_OVERRIDES_READ_WRITE, e)
def visit_with_stmt(self, s: WithStmt) -> None:
exceptions_maybe_suppressed = False
for expr, target in zip(s.expr, s.target):
if s.is_async:
exit_ret_type = self.check_async_with_item(expr, target, s.unanalyzed_type is None)
else:
exit_ret_type = self.check_with_item(expr, target, s.unanalyzed_type is None)
# Based on the return type, determine if this context manager 'swallows'
# exceptions or not. We determine this using a heuristic based on the
# return type of the __exit__ method -- see the discussion in
# https://github.com/python/mypy/issues/7214 and the section about context managers
# in https://github.com/python/typeshed/blob/main/CONTRIBUTING.md#conventions
# for more details.
exit_ret_type = get_proper_type(exit_ret_type)
if is_literal_type(exit_ret_type, "builtins.bool", False):
continue
if is_literal_type(exit_ret_type, "builtins.bool", True) or (
isinstance(exit_ret_type, Instance)
and exit_ret_type.type.fullname == "builtins.bool"
and state.strict_optional
):
# Note: if strict-optional is disabled, this bool instance
# could actually be an Optional[bool].
exceptions_maybe_suppressed = True
if exceptions_maybe_suppressed:
# Treat this 'with' block in the same way we'd treat a 'try: BODY; except: pass'
# block. This means control flow can continue after the 'with' even if the 'with'
# block immediately returns.
with self.binder.frame_context(can_skip=True, try_frame=True):
self.accept(s.body)
else:
self.accept(s.body)
def check_untyped_after_decorator(self, typ: Type, func: FuncDef) -> None:
if not self.options.disallow_any_decorated or self.is_stub:
return
if mypy.checkexpr.has_any_type(typ):
self.msg.untyped_decorated_function(typ, func)
def check_async_with_item(
self, expr: Expression, target: Expression | None, infer_lvalue_type: bool
) -> Type:
echk = self.expr_checker
ctx = echk.accept(expr)
obj = echk.check_method_call_by_name("__aenter__", ctx, [], [], expr)[0]
obj = echk.check_awaitable_expr(
obj, expr, message_registry.INCOMPATIBLE_TYPES_IN_ASYNC_WITH_AENTER
)
if target:
self.check_assignment(target, self.temp_node(obj, expr), infer_lvalue_type)
arg = self.temp_node(AnyType(TypeOfAny.special_form), expr)
res, _ = echk.check_method_call_by_name(
"__aexit__", ctx, [arg] * 3, [nodes.ARG_POS] * 3, expr
)
return echk.check_awaitable_expr(
res, expr, message_registry.INCOMPATIBLE_TYPES_IN_ASYNC_WITH_AEXIT
)
def check_with_item(
self, expr: Expression, target: Expression | None, infer_lvalue_type: bool
) -> Type:
echk = self.expr_checker
ctx = echk.accept(expr)
obj = echk.check_method_call_by_name("__enter__", ctx, [], [], expr)[0]
if target:
self.check_assignment(target, self.temp_node(obj, expr), infer_lvalue_type)
arg = self.temp_node(AnyType(TypeOfAny.special_form), expr)
res, _ = echk.check_method_call_by_name(
"__exit__", ctx, [arg] * 3, [nodes.ARG_POS] * 3, expr
)
return res
def visit_break_stmt(self, s: BreakStmt) -> None:
self.binder.handle_break()
def visit_continue_stmt(self, s: ContinueStmt) -> None:
self.binder.handle_continue()
return
def visit_match_stmt(self, s: MatchStmt) -> None:
named_subject: Expression
if isinstance(s.subject, CallExpr):
# Create a dummy subject expression to handle cases where a match statement's subject
# is not a literal value. This lets us correctly narrow types and check exhaustivity
# This is hack!
id = s.subject.callee.fullname if isinstance(s.subject.callee, RefExpr) else ""
name = "dummy-match-" + id
v = Var(name)
named_subject = NameExpr(name)
named_subject.node = v
else:
named_subject = s.subject
with self.binder.frame_context(can_skip=False, fall_through=0):
subject_type = get_proper_type(self.expr_checker.accept(s.subject))
if isinstance(subject_type, DeletedType):
self.msg.deleted_as_rvalue(subject_type, s)
# We infer types of patterns twice. The first pass is used
# to infer the types of capture variables. The type of a
# capture variable may depend on multiple patterns (it
# will be a union of all capture types). This pass ignores
# guard expressions.
pattern_types = [self.pattern_checker.accept(p, subject_type) for p in s.patterns]
type_maps: list[TypeMap] = [t.captures for t in pattern_types]
inferred_types = self.infer_variable_types_from_type_maps(type_maps)
# The second pass narrows down the types and type checks bodies.
for p, g, b in zip(s.patterns, s.guards, s.bodies):
current_subject_type = self.expr_checker.narrow_type_from_binder(
named_subject, subject_type
)
pattern_type = self.pattern_checker.accept(p, current_subject_type)
with self.binder.frame_context(can_skip=True, fall_through=2):
if b.is_unreachable or isinstance(
get_proper_type(pattern_type.type), UninhabitedType
):
self.push_type_map(None)
else_map: TypeMap = {}
else:
pattern_map, else_map = conditional_types_to_typemaps(
named_subject, pattern_type.type, pattern_type.rest_type
)
self.remove_capture_conflicts(pattern_type.captures, inferred_types)
self.push_type_map(pattern_map)
if pattern_map:
for expr, typ in pattern_map.items():
self.push_type_map(self._get_recursive_sub_patterns_map(expr, typ))
self.push_type_map(pattern_type.captures)
if g is not None:
with self.binder.frame_context(can_skip=False, fall_through=3):
gt = get_proper_type(self.expr_checker.accept(g))
if isinstance(gt, DeletedType):
self.msg.deleted_as_rvalue(gt, s)
guard_map, guard_else_map = self.find_isinstance_check(g)
else_map = or_conditional_maps(else_map, guard_else_map)
# If the guard narrowed the subject, copy the narrowed types over
if isinstance(p, AsPattern):
case_target = p.pattern or p.name
if isinstance(case_target, NameExpr):
for type_map in (guard_map, else_map):
if not type_map:
continue
for expr in list(type_map):
if not (
isinstance(expr, NameExpr)
and expr.fullname == case_target.fullname
):
continue
type_map[named_subject] = type_map[expr]
self.push_type_map(guard_map)
self.accept(b)
else:
self.accept(b)
self.push_type_map(else_map)
# This is needed due to a quirk in frame_context. Without it types will stay narrowed
# after the match.
with self.binder.frame_context(can_skip=False, fall_through=2):
pass
def _get_recursive_sub_patterns_map(
self, expr: Expression, typ: Type
) -> dict[Expression, Type]:
sub_patterns_map: dict[Expression, Type] = {}
typ_ = get_proper_type(typ)
if isinstance(expr, TupleExpr) and isinstance(typ_, TupleType):
# When matching a tuple expression with a sequence pattern, narrow individual tuple items
assert len(expr.items) == len(typ_.items)
for item_expr, item_typ in zip(expr.items, typ_.items):
sub_patterns_map[item_expr] = item_typ
sub_patterns_map.update(self._get_recursive_sub_patterns_map(item_expr, item_typ))
return sub_patterns_map
def infer_variable_types_from_type_maps(self, type_maps: list[TypeMap]) -> dict[Var, Type]:
all_captures: dict[Var, list[tuple[NameExpr, Type]]] = defaultdict(list)
for tm in type_maps:
if tm is not None:
for expr, typ in tm.items():
if isinstance(expr, NameExpr):
node = expr.node
assert isinstance(node, Var)
all_captures[node].append((expr, typ))
inferred_types: dict[Var, Type] = {}
for var, captures in all_captures.items():
already_exists = False
types: list[Type] = []
for expr, typ in captures:
types.append(typ)
previous_type, _, _ = self.check_lvalue(expr)
if previous_type is not None:
already_exists = True
if self.check_subtype(
typ,
previous_type,
expr,
msg=message_registry.INCOMPATIBLE_TYPES_IN_CAPTURE,
subtype_label="pattern captures type",
supertype_label="variable has type",
):
inferred_types[var] = previous_type
if not already_exists:
new_type = UnionType.make_union(types)
# Infer the union type at the first occurrence
first_occurrence, _ = captures[0]
inferred_types[var] = new_type
self.infer_variable_type(var, first_occurrence, new_type, first_occurrence)
return inferred_types
def remove_capture_conflicts(self, type_map: TypeMap, inferred_types: dict[Var, Type]) -> None:
if type_map:
for expr, typ in list(type_map.items()):
if isinstance(expr, NameExpr):
node = expr.node
assert isinstance(node, Var)
if node not in inferred_types or not is_subtype(typ, inferred_types[node]):
del type_map[expr]
def visit_type_alias_stmt(self, o: TypeAliasStmt) -> None:
with self.msg.filter_errors():
self.expr_checker.accept(o.value)
def make_fake_typeinfo(
self,
curr_module_fullname: str,
class_gen_name: str,
class_short_name: str,
bases: list[Instance],
) -> tuple[ClassDef, TypeInfo]:
# Build the fake ClassDef and TypeInfo together.
# The ClassDef is full of lies and doesn't actually contain a body.
# Use format_bare to generate a nice name for error messages.
# We skip fully filling out a handful of TypeInfo fields because they
# should be irrelevant for a generated type like this:
# is_protocol, protocol_members, is_abstract
cdef = ClassDef(class_short_name, Block([]))
cdef.fullname = curr_module_fullname + "." + class_gen_name
info = TypeInfo(SymbolTable(), cdef, curr_module_fullname)
cdef.info = info
info.bases = bases
calculate_mro(info)
info.metaclass_type = info.calculate_metaclass_type()
return cdef, info
def intersect_instances(
self, instances: tuple[Instance, Instance], errors: list[tuple[str, str]]
) -> Instance | None:
"""Try creating an ad-hoc intersection of the given instances.
Note that this function does *not* try and create a full-fledged
intersection type. Instead, it returns an instance of a new ad-hoc
subclass of the given instances.
This is mainly useful when you need a way of representing some
theoretical subclass of the instances the user may be trying to use
the generated intersection can serve as a placeholder.
This function will create a fresh subclass every time you call it,
even if you pass in the exact same arguments. So this means calling
`self.intersect_intersection([inst_1, inst_2], ctx)` twice will result
in instances of two distinct subclasses of inst_1 and inst_2.
This is by design: we want each ad-hoc intersection to be unique since
they're supposed represent some other unknown subclass.
Returns None if creating the subclass is impossible (e.g. due to
MRO errors or incompatible signatures). If we do successfully create
a subclass, its TypeInfo will automatically be added to the global scope.
"""
curr_module = self.scope.stack[0]
assert isinstance(curr_module, MypyFile)
# First, retry narrowing while allowing promotions (they are disabled by default
# for isinstance() checks, etc). This way we will still type-check branches like
# x: complex = 1
# if isinstance(x, int):
# ...
left, right = instances
if is_proper_subtype(left, right, ignore_promotions=False):
return left
if is_proper_subtype(right, left, ignore_promotions=False):
return right
def _get_base_classes(instances_: tuple[Instance, Instance]) -> list[Instance]:
base_classes_ = []
for inst in instances_:
if inst.type.is_intersection:
expanded = inst.type.bases
else:
expanded = [inst]
for expanded_inst in expanded:
base_classes_.append(expanded_inst)
return base_classes_
def _make_fake_typeinfo_and_full_name(
base_classes_: list[Instance], curr_module_: MypyFile
) -> tuple[TypeInfo, str]:
names_list = pretty_seq([x.type.name for x in base_classes_], "and")
short_name = f"<subclass of {names_list}>"
full_name_ = gen_unique_name(short_name, curr_module_.names)
cdef, info_ = self.make_fake_typeinfo(
curr_module_.fullname, full_name_, short_name, base_classes_
)
return info_, full_name_
base_classes = _get_base_classes(instances)
# We use the pretty_names_list for error messages but can't
# use it for the real name that goes into the symbol table
# because it can have dots in it.
pretty_names_list = pretty_seq(
format_type_distinctly(*base_classes, options=self.options, bare=True), "and"
)
new_errors = []
for base in base_classes:
if base.type.is_final:
new_errors.append((pretty_names_list, f'"{base.type.name}" is final'))
if new_errors:
errors.extend(new_errors)
return None
try:
info, full_name = _make_fake_typeinfo_and_full_name(base_classes, curr_module)
with self.msg.filter_errors() as local_errors:
self.check_multiple_inheritance(info)
if local_errors.has_new_errors():
# "class A(B, C)" unsafe, now check "class A(C, B)":
base_classes = _get_base_classes(instances[::-1])
info, full_name = _make_fake_typeinfo_and_full_name(base_classes, curr_module)
with self.msg.filter_errors() as local_errors:
self.check_multiple_inheritance(info)
info.is_intersection = True
except MroError:
errors.append((pretty_names_list, "would have inconsistent method resolution order"))
return None
if local_errors.has_new_errors():
errors.append((pretty_names_list, "would have incompatible method signatures"))
return None
curr_module.names[full_name] = SymbolTableNode(GDEF, info)
return Instance(info, [], extra_attrs=instances[0].extra_attrs or instances[1].extra_attrs)
def intersect_instance_callable(self, typ: Instance, callable_type: CallableType) -> Instance:
"""Creates a fake type that represents the intersection of an Instance and a CallableType.
It operates by creating a bare-minimum dummy TypeInfo that
subclasses type and adds a __call__ method matching callable_type.
"""
# In order for this to work in incremental mode, the type we generate needs to
# have a valid fullname and a corresponding entry in a symbol table. We generate
# a unique name inside the symbol table of the current module.
cur_module = self.scope.stack[0]
assert isinstance(cur_module, MypyFile)
gen_name = gen_unique_name(f"<callable subtype of {typ.type.name}>", cur_module.names)
# Synthesize a fake TypeInfo
short_name = format_type_bare(typ, self.options)
cdef, info = self.make_fake_typeinfo(cur_module.fullname, gen_name, short_name, [typ])
# Build up a fake FuncDef so we can populate the symbol table.
func_def = FuncDef("__call__", [], Block([]), callable_type)
func_def._fullname = cdef.fullname + ".__call__"
func_def.info = info
info.names["__call__"] = SymbolTableNode(MDEF, func_def)
cur_module.names[gen_name] = SymbolTableNode(GDEF, info)
return Instance(info, [], extra_attrs=typ.extra_attrs)
def make_fake_callable(self, typ: Instance) -> Instance:
"""Produce a new type that makes type Callable with a generic callable type."""
fallback = self.named_type("builtins.function")
callable_type = CallableType(
[AnyType(TypeOfAny.explicit), AnyType(TypeOfAny.explicit)],
[nodes.ARG_STAR, nodes.ARG_STAR2],
[None, None],
ret_type=AnyType(TypeOfAny.explicit),
fallback=fallback,
is_ellipsis_args=True,
)
return self.intersect_instance_callable(typ, callable_type)
def partition_by_callable(
self, typ: Type, unsound_partition: bool
) -> tuple[list[Type], list[Type]]:
"""Partitions a type into callable subtypes and uncallable subtypes.
Thus, given:
`callables, uncallables = partition_by_callable(type)`
If we assert `callable(type)` then `type` has type Union[*callables], and
If we assert `not callable(type)` then `type` has type Union[*uncallables]
If unsound_partition is set, assume that anything that is not
clearly callable is in fact not callable. Otherwise we generate a
new subtype that *is* callable.
Guaranteed to not return [], [].
"""
typ = get_proper_type(typ)
if isinstance(typ, (FunctionLike, TypeType)):
return [typ], []
if isinstance(typ, AnyType):
return [typ], [typ]
if isinstance(typ, NoneType):
return [], [typ]
if isinstance(typ, UnionType):
callables = []
uncallables = []
for subtype in typ.items:
# Use unsound_partition when handling unions in order to
# allow the expected type discrimination.
subcallables, subuncallables = self.partition_by_callable(
subtype, unsound_partition=True
)
callables.extend(subcallables)
uncallables.extend(subuncallables)
return callables, uncallables
if isinstance(typ, TypeVarType):
# We could do better probably?
# Refine the the type variable's bound as our type in the case that
# callable() is true. This unfortunately loses the information that
# the type is a type variable in that branch.
# This matches what is done for isinstance, but it may be possible to
# do better.
# If it is possible for the false branch to execute, return the original
# type to avoid losing type information.
callables, uncallables = self.partition_by_callable(
erase_to_union_or_bound(typ), unsound_partition
)
uncallables = [typ] if uncallables else []
return callables, uncallables
# A TupleType is callable if its fallback is, but needs special handling
# when we dummy up a new type.
ityp = typ
if isinstance(typ, TupleType):
ityp = tuple_fallback(typ)
if isinstance(ityp, Instance):
method = ityp.type.get_method("__call__")
if method and method.type:
callables, uncallables = self.partition_by_callable(
method.type, unsound_partition=False
)
if callables and not uncallables:
# Only consider the type callable if its __call__ method is
# definitely callable.
return [typ], []
if not unsound_partition:
fake = self.make_fake_callable(ityp)
if isinstance(typ, TupleType):
fake.type.tuple_type = TupleType(typ.items, fake)
return [fake.type.tuple_type], [typ]
return [fake], [typ]
if unsound_partition:
return [], [typ]
else:
# We don't know how properly make the type callable.
return [typ], [typ]
def conditional_callable_type_map(
self, expr: Expression, current_type: Type | None
) -> tuple[TypeMap, TypeMap]:
"""Takes in an expression and the current type of the expression.
Returns a 2-tuple: The first element is a map from the expression to
the restricted type if it were callable. The second element is a
map from the expression to the type it would hold if it weren't
callable.
"""
if not current_type:
return {}, {}
if isinstance(get_proper_type(current_type), AnyType):
return {}, {}
callables, uncallables = self.partition_by_callable(current_type, unsound_partition=False)
if callables and uncallables:
callable_map = {expr: UnionType.make_union(callables)} if callables else None
uncallable_map = {expr: UnionType.make_union(uncallables)} if uncallables else None
return callable_map, uncallable_map
elif callables:
return {}, None
return None, {}
def conditional_types_for_iterable(
self, item_type: Type, iterable_type: Type
) -> tuple[Type | None, Type | None]:
"""
Narrows the type of `iterable_type` based on the type of `item_type`.
For now, we only support narrowing unions of TypedDicts based on left operand being literal string(s).
"""
if_types: list[Type] = []
else_types: list[Type] = []
iterable_type = get_proper_type(iterable_type)
if isinstance(iterable_type, UnionType):
possible_iterable_types = get_proper_types(iterable_type.relevant_items())
else:
possible_iterable_types = [iterable_type]
item_str_literals = try_getting_str_literals_from_type(item_type)
for possible_iterable_type in possible_iterable_types:
if item_str_literals and isinstance(possible_iterable_type, TypedDictType):
for key in item_str_literals:
if key in possible_iterable_type.required_keys:
if_types.append(possible_iterable_type)
elif (
key in possible_iterable_type.items or not possible_iterable_type.is_final
):
if_types.append(possible_iterable_type)
else_types.append(possible_iterable_type)
else:
else_types.append(possible_iterable_type)
else:
if_types.append(possible_iterable_type)
else_types.append(possible_iterable_type)
return (
UnionType.make_union(if_types) if if_types else None,
UnionType.make_union(else_types) if else_types else None,
)
def _is_truthy_type(self, t: ProperType) -> bool:
return (
(
isinstance(t, Instance)
and bool(t.type)
and not t.type.has_readable_member("__bool__")
and not t.type.has_readable_member("__len__")
and t.type.fullname != "builtins.object"
)
or isinstance(t, FunctionLike)
or (
isinstance(t, UnionType)
and all(self._is_truthy_type(t) for t in get_proper_types(t.items))
)
)
def check_for_truthy_type(self, t: Type, expr: Expression) -> None:
"""
Check if a type can have a truthy value.
Used in checks like::
if x: # <---
not x # <---
"""
if not state.strict_optional:
return # if everything can be None, all bets are off
t = get_proper_type(t)
if not self._is_truthy_type(t):
return
def format_expr_type() -> str:
typ = format_type(t, self.options)
if isinstance(expr, MemberExpr):
return f'Member "{expr.name}" has type {typ}'
elif isinstance(expr, RefExpr) and expr.fullname:
return f'"{expr.fullname}" has type {typ}'
elif isinstance(expr, CallExpr):
if isinstance(expr.callee, MemberExpr):
return f'"{expr.callee.name}" returns {typ}'
elif isinstance(expr.callee, RefExpr) and expr.callee.fullname:
return f'"{expr.callee.fullname}" returns {typ}'
return f"Call returns {typ}"
else:
return f"Expression has type {typ}"
def get_expr_name() -> str:
if isinstance(expr, (NameExpr, MemberExpr)):
return f'"{expr.name}"'
else:
# return type if expr has no name
return format_type(t, self.options)
if isinstance(t, FunctionLike):
self.fail(message_registry.FUNCTION_ALWAYS_TRUE.format(get_expr_name()), expr)
elif isinstance(t, UnionType):
self.fail(message_registry.TYPE_ALWAYS_TRUE_UNIONTYPE.format(format_expr_type()), expr)
elif isinstance(t, Instance) and t.type.fullname == "typing.Iterable":
_, info = self.make_fake_typeinfo("typing", "Collection", "Collection", [])
self.fail(
message_registry.ITERABLE_ALWAYS_TRUE.format(
format_expr_type(), format_type(Instance(info, t.args), self.options)
),
expr,
)
else:
self.fail(message_registry.TYPE_ALWAYS_TRUE.format(format_expr_type()), expr)
def find_type_equals_check(
self, node: ComparisonExpr, expr_indices: list[int]
) -> tuple[TypeMap, TypeMap]:
"""Narrow types based on any checks of the type ``type(x) == T``
Args:
node: The node that might contain the comparison
expr_indices: The list of indices of expressions in ``node`` that are being
compared
"""
def is_type_call(expr: CallExpr) -> bool:
"""Is expr a call to type with one argument?"""
return refers_to_fullname(expr.callee, "builtins.type") and len(expr.args) == 1
# exprs that are being passed into type
exprs_in_type_calls: list[Expression] = []
# type that is being compared to type(expr)
type_being_compared: list[TypeRange] | None = None
# whether the type being compared to is final
is_final = False
for index in expr_indices:
expr = node.operands[index]
if isinstance(expr, CallExpr) and is_type_call(expr):
exprs_in_type_calls.append(expr.args[0])
else:
current_type = self.get_isinstance_type(expr)
if current_type is None:
continue
if type_being_compared is not None:
# It doesn't really make sense to have several types being
# compared to the output of type (like type(x) == int == str)
# because whether that's true is solely dependent on what the
# types being compared are, so we don't try to narrow types any
# further because we can't really get any information about the
# type of x from that check
return {}, {}
else:
if isinstance(expr, RefExpr) and isinstance(expr.node, TypeInfo):
is_final = expr.node.is_final
type_being_compared = current_type
if not exprs_in_type_calls:
return {}, {}
if_maps: list[TypeMap] = []
else_maps: list[TypeMap] = []
for expr in exprs_in_type_calls:
current_if_type, current_else_type = self.conditional_types_with_intersection(
self.lookup_type(expr), type_being_compared, expr
)
current_if_map, current_else_map = conditional_types_to_typemaps(
expr, current_if_type, current_else_type
)
if_maps.append(current_if_map)
else_maps.append(current_else_map)
def combine_maps(list_maps: list[TypeMap]) -> TypeMap:
"""Combine all typemaps in list_maps into one typemap"""
result_map = {}
for d in list_maps:
if d is not None:
result_map.update(d)
return result_map
if_map = combine_maps(if_maps)
# type(x) == T is only true when x has the same type as T, meaning
# that it can be false if x is an instance of a subclass of T. That means
# we can't do any narrowing in the else case unless T is final, in which
# case T can't be subclassed
if is_final:
else_map = combine_maps(else_maps)
else:
else_map = {}
return if_map, else_map
def find_isinstance_check(
self, node: Expression, *, in_boolean_context: bool = True
) -> tuple[TypeMap, TypeMap]:
"""Find any isinstance checks (within a chain of ands). Includes
implicit and explicit checks for None and calls to callable.
Also includes TypeGuard and TypeIs functions.
Return value is a map of variables to their types if the condition
is true and a map of variables to their types if the condition is false.
If either of the values in the tuple is None, then that particular
branch can never occur.
If `in_boolean_context=True` is passed, it means that we handle
a walrus expression. We treat rhs values
in expressions like `(a := A())` specially:
for example, some errors are suppressed.
May return {}, {}.
Can return None, None in situations involving NoReturn.
"""
if_map, else_map = self.find_isinstance_check_helper(
node, in_boolean_context=in_boolean_context
)
new_if_map = self.propagate_up_typemap_info(if_map)
new_else_map = self.propagate_up_typemap_info(else_map)
return new_if_map, new_else_map
def find_isinstance_check_helper(
self, node: Expression, *, in_boolean_context: bool = True
) -> tuple[TypeMap, TypeMap]:
if is_true_literal(node):
return {}, None
if is_false_literal(node):
return None, {}
if isinstance(node, CallExpr) and len(node.args) != 0:
expr = collapse_walrus(node.args[0])
if refers_to_fullname(node.callee, "builtins.isinstance"):
if len(node.args) != 2: # the error will be reported elsewhere
return {}, {}
if literal(expr) == LITERAL_TYPE:
return conditional_types_to_typemaps(
expr,
*self.conditional_types_with_intersection(
self.lookup_type(expr), self.get_isinstance_type(node.args[1]), expr
),
)
elif refers_to_fullname(node.callee, "builtins.issubclass"):
if len(node.args) != 2: # the error will be reported elsewhere
return {}, {}
if literal(expr) == LITERAL_TYPE:
return self.infer_issubclass_maps(node, expr)
elif refers_to_fullname(node.callee, "builtins.callable"):
if len(node.args) != 1: # the error will be reported elsewhere
return {}, {}
if literal(expr) == LITERAL_TYPE:
vartype = self.lookup_type(expr)
return self.conditional_callable_type_map(expr, vartype)
elif refers_to_fullname(node.callee, "builtins.hasattr"):
if len(node.args) != 2: # the error will be reported elsewhere
return {}, {}
attr = try_getting_str_literals(node.args[1], self.lookup_type(node.args[1]))
if literal(expr) == LITERAL_TYPE and attr and len(attr) == 1:
return self.hasattr_type_maps(expr, self.lookup_type(expr), attr[0])
elif isinstance(node.callee, RefExpr):
if node.callee.type_guard is not None or node.callee.type_is is not None:
# TODO: Follow *args, **kwargs
if node.arg_kinds[0] != nodes.ARG_POS:
# the first argument might be used as a kwarg
called_type = get_proper_type(self.lookup_type(node.callee))
# TODO: there are some more cases in check_call() to handle.
if isinstance(called_type, Instance):
call = find_member(
"__call__", called_type, called_type, is_operator=True
)
if call is not None:
called_type = get_proper_type(call)
# *assuming* the overloaded function is correct, there's a couple cases:
# 1) The first argument has different names, but is pos-only. We don't
# care about this case, the argument must be passed positionally.
# 2) The first argument allows keyword reference, therefore must be the
# same between overloads.
if isinstance(called_type, (CallableType, Overloaded)):
name = called_type.items[0].arg_names[0]
if name in node.arg_names:
idx = node.arg_names.index(name)
# we want the idx-th variable to be narrowed
expr = collapse_walrus(node.args[idx])
else:
kind = (
"guard" if node.callee.type_guard is not None else "narrower"
)
self.fail(
message_registry.TYPE_GUARD_POS_ARG_REQUIRED.format(kind), node
)
return {}, {}
if literal(expr) == LITERAL_TYPE:
# Note: we wrap the target type, so that we can special case later.
# Namely, for isinstance() we use a normal meet, while TypeGuard is
# considered "always right" (i.e. even if the types are not overlapping).
# Also note that a care must be taken to unwrap this back at read places
# where we use this to narrow down declared type.
if node.callee.type_guard is not None:
return {expr: TypeGuardedType(node.callee.type_guard)}, {}
else:
assert node.callee.type_is is not None
return conditional_types_to_typemaps(
expr,
*self.conditional_types_with_intersection(
self.lookup_type(expr),
[TypeRange(node.callee.type_is, is_upper_bound=False)],
expr,
),
)
elif isinstance(node, ComparisonExpr):
# Step 1: Obtain the types of each operand and whether or not we can
# narrow their types. (For example, we shouldn't try narrowing the
# types of literal string or enum expressions).
operands = [collapse_walrus(x) for x in node.operands]
operand_types = []
narrowable_operand_index_to_hash = {}
for i, expr in enumerate(operands):
if not self.has_type(expr):
return {}, {}
expr_type = self.lookup_type(expr)
operand_types.append(expr_type)
if (
literal(expr) == LITERAL_TYPE
and not is_literal_none(expr)
and not self.is_literal_enum(expr)
):
h = literal_hash(expr)
if h is not None:
narrowable_operand_index_to_hash[i] = h
# Step 2: Group operands chained by either the 'is' or '==' operands
# together. For all other operands, we keep them in groups of size 2.
# So the expression:
#
# x0 == x1 == x2 < x3 < x4 is x5 is x6 is not x7 is not x8
#
# ...is converted into the simplified operator list:
#
# [("==", [0, 1, 2]), ("<", [2, 3]), ("<", [3, 4]),
# ("is", [4, 5, 6]), ("is not", [6, 7]), ("is not", [7, 8])]
#
# We group identity/equality expressions so we can propagate information
# we discover about one operand across the entire chain. We don't bother
# handling 'is not' and '!=' chains in a special way: those are very rare
# in practice.
simplified_operator_list = group_comparison_operands(
node.pairwise(), narrowable_operand_index_to_hash, {"==", "is"}
)
# Step 3: Analyze each group and infer more precise type maps for each
# assignable operand, if possible. We combine these type maps together
# in the final step.
partial_type_maps = []
for operator, expr_indices in simplified_operator_list:
if operator in {"is", "is not", "==", "!="}:
# is_valid_target:
# Controls which types we're allowed to narrow exprs to. Note that
# we cannot use 'is_literal_type_like' in both cases since doing
# 'x = 10000 + 1; x is 10001' is not always True in all Python
# implementations.
#
# coerce_only_in_literal_context:
# If true, coerce types into literal types only if one or more of
# the provided exprs contains an explicit Literal type. This could
# technically be set to any arbitrary value, but it seems being liberal
# with narrowing when using 'is' and conservative when using '==' seems
# to break the least amount of real-world code.
#
# should_narrow_by_identity:
# Set to 'false' only if the user defines custom __eq__ or __ne__ methods
# that could cause identity-based narrowing to produce invalid results.
if operator in {"is", "is not"}:
is_valid_target: Callable[[Type], bool] = is_singleton_type
coerce_only_in_literal_context = False
should_narrow_by_identity = True
else:
def is_exactly_literal_type(t: Type) -> bool:
return isinstance(get_proper_type(t), LiteralType)
def has_no_custom_eq_checks(t: Type) -> bool:
return not custom_special_method(
t, "__eq__", check_all=False
) and not custom_special_method(t, "__ne__", check_all=False)
is_valid_target = is_exactly_literal_type
coerce_only_in_literal_context = True
expr_types = [operand_types[i] for i in expr_indices]
should_narrow_by_identity = all(
map(has_no_custom_eq_checks, expr_types)
) and not is_ambiguous_mix_of_enums(expr_types)
if_map: TypeMap = {}
else_map: TypeMap = {}
if should_narrow_by_identity:
if_map, else_map = self.refine_identity_comparison_expression(
operands,
operand_types,
expr_indices,
narrowable_operand_index_to_hash.keys(),
is_valid_target,
coerce_only_in_literal_context,
)
# Strictly speaking, we should also skip this check if the objects in the expr
# chain have custom __eq__ or __ne__ methods. But we (maybe optimistically)
# assume nobody would actually create a custom objects that considers itself
# equal to None.
if if_map == {} and else_map == {}:
if_map, else_map = self.refine_away_none_in_comparison(
operands,
operand_types,
expr_indices,
narrowable_operand_index_to_hash.keys(),
)
# If we haven't been able to narrow types yet, we might be dealing with a
# explicit type(x) == some_type check
if if_map == {} and else_map == {}:
if_map, else_map = self.find_type_equals_check(node, expr_indices)
elif operator in {"in", "not in"}:
assert len(expr_indices) == 2
left_index, right_index = expr_indices
item_type = operand_types[left_index]
iterable_type = operand_types[right_index]
if_map, else_map = {}, {}
if left_index in narrowable_operand_index_to_hash:
# We only try and narrow away 'None' for now
if is_overlapping_none(item_type):
collection_item_type = get_proper_type(
builtin_item_type(iterable_type)
)
if (
collection_item_type is not None
and not is_overlapping_none(collection_item_type)
and not (
isinstance(collection_item_type, Instance)
and collection_item_type.type.fullname == "builtins.object"
)
and is_overlapping_erased_types(item_type, collection_item_type)
):
if_map[operands[left_index]] = remove_optional(item_type)
if right_index in narrowable_operand_index_to_hash:
if_type, else_type = self.conditional_types_for_iterable(
item_type, iterable_type
)
expr = operands[right_index]
if if_type is None:
if_map = None
else:
if_map[expr] = if_type
if else_type is None:
else_map = None
else:
else_map[expr] = else_type
else:
if_map = {}
else_map = {}
if operator in {"is not", "!=", "not in"}:
if_map, else_map = else_map, if_map
partial_type_maps.append((if_map, else_map))
# If we have found non-trivial restrictions from the regular comparisons,
# then return soon. Otherwise try to infer restrictions involving `len(x)`.
# TODO: support regular and len() narrowing in the same chain.
if any(m != ({}, {}) for m in partial_type_maps):
return reduce_conditional_maps(partial_type_maps)
else:
# Use meet for `and` maps to get correct results for chained checks
# like `if 1 < len(x) < 4: ...`
return reduce_conditional_maps(self.find_tuple_len_narrowing(node), use_meet=True)
elif isinstance(node, AssignmentExpr):
if_map = {}
else_map = {}
if_assignment_map, else_assignment_map = self.find_isinstance_check(node.target)
if if_assignment_map is not None:
if_map.update(if_assignment_map)
if else_assignment_map is not None:
else_map.update(else_assignment_map)
if_condition_map, else_condition_map = self.find_isinstance_check(
node.value, in_boolean_context=False
)
if if_condition_map is not None:
if_map.update(if_condition_map)
if else_condition_map is not None:
else_map.update(else_condition_map)
return (
(None if if_assignment_map is None or if_condition_map is None else if_map),
(None if else_assignment_map is None or else_condition_map is None else else_map),
)
elif isinstance(node, OpExpr) and node.op == "and":
left_if_vars, left_else_vars = self.find_isinstance_check(node.left)
right_if_vars, right_else_vars = self.find_isinstance_check(node.right)
# (e1 and e2) is true if both e1 and e2 are true,
# and false if at least one of e1 and e2 is false.
return (
and_conditional_maps(left_if_vars, right_if_vars),
# Note that if left else type is Any, we can't add any additional
# types to it, since the right maps were computed assuming
# the left is True, which may be not the case in the else branch.
or_conditional_maps(left_else_vars, right_else_vars, coalesce_any=True),
)
elif isinstance(node, OpExpr) and node.op == "or":
left_if_vars, left_else_vars = self.find_isinstance_check(node.left)
right_if_vars, right_else_vars = self.find_isinstance_check(node.right)
# (e1 or e2) is true if at least one of e1 or e2 is true,
# and false if both e1 and e2 are false.
return (
or_conditional_maps(left_if_vars, right_if_vars),
and_conditional_maps(left_else_vars, right_else_vars),
)
elif isinstance(node, UnaryExpr) and node.op == "not":
left, right = self.find_isinstance_check(node.expr)
return right, left
elif (
literal(node) == LITERAL_TYPE
and self.has_type(node)
and self.can_be_narrowed_with_len(self.lookup_type(node))
# Only translate `if x` to `if len(x) > 0` when possible.
and not custom_special_method(self.lookup_type(node), "__bool__")
and self.options.strict_optional
):
# Combine a `len(x) > 0` check with the default logic below.
yes_type, no_type = self.narrow_with_len(self.lookup_type(node), ">", 0)
if yes_type is not None:
yes_type = true_only(yes_type)
else:
yes_type = UninhabitedType()
if no_type is not None:
no_type = false_only(no_type)
else:
no_type = UninhabitedType()
if_map = {node: yes_type} if not isinstance(yes_type, UninhabitedType) else None
else_map = {node: no_type} if not isinstance(no_type, UninhabitedType) else None
return if_map, else_map
# Restrict the type of the variable to True-ish/False-ish in the if and else branches
# respectively
original_vartype = self.lookup_type(node)
if in_boolean_context:
# We don't check `:=` values in expressions like `(a := A())`,
# because they produce two error messages.
self.check_for_truthy_type(original_vartype, node)
vartype = try_expanding_sum_type_to_union(original_vartype, "builtins.bool")
if_type = true_only(vartype)
else_type = false_only(vartype)
if_map = {node: if_type} if not isinstance(if_type, UninhabitedType) else None
else_map = {node: else_type} if not isinstance(else_type, UninhabitedType) else None
return if_map, else_map
def propagate_up_typemap_info(self, new_types: TypeMap) -> TypeMap:
"""Attempts refining parent expressions of any MemberExpr or IndexExprs in new_types.
Specifically, this function accepts two mappings of expression to original types:
the original mapping (existing_types), and a new mapping (new_types) intended to
update the original.
This function iterates through new_types and attempts to use the information to try
refining any parent types that happen to be unions.
For example, suppose there are two types "A = Tuple[int, int]" and "B = Tuple[str, str]".
Next, suppose that 'new_types' specifies the expression 'foo[0]' has a refined type
of 'int' and that 'foo' was previously deduced to be of type Union[A, B].
Then, this function will observe that since A[0] is an int and B[0] is not, the type of
'foo' can be further refined from Union[A, B] into just B.
We perform this kind of "parent narrowing" for member lookup expressions and indexing
expressions into tuples, namedtuples, and typeddicts. We repeat this narrowing
recursively if the parent is also a "lookup expression". So for example, if we have
the expression "foo['bar'].baz[0]", we'd potentially end up refining types for the
expressions "foo", "foo['bar']", and "foo['bar'].baz".
We return the newly refined map. This map is guaranteed to be a superset of 'new_types'.
"""
if new_types is None:
return None
output_map = {}
for expr, expr_type in new_types.items():
# The original inferred type should always be present in the output map, of course
output_map[expr] = expr_type
# Next, try using this information to refine the parent types, if applicable.
new_mapping = self.refine_parent_types(expr, expr_type)
for parent_expr, proposed_parent_type in new_mapping.items():
# We don't try inferring anything if we've already inferred something for
# the parent expression.
# TODO: Consider picking the narrower type instead of always discarding this?
if parent_expr in new_types:
continue
output_map[parent_expr] = proposed_parent_type
return output_map
def refine_parent_types(self, expr: Expression, expr_type: Type) -> Mapping[Expression, Type]:
"""Checks if the given expr is a 'lookup operation' into a union and iteratively refines
the parent types based on the 'expr_type'.
For example, if 'expr' is an expression like 'a.b.c.d', we'll potentially return refined
types for expressions 'a', 'a.b', and 'a.b.c'.
For more details about what a 'lookup operation' is and how we use the expr_type to refine
the parent types of lookup_expr, see the docstring in 'propagate_up_typemap_info'.
"""
output: dict[Expression, Type] = {}
# Note: parent_expr and parent_type are progressively refined as we crawl up the
# parent lookup chain.
while True:
# First, check if this expression is one that's attempting to
# "lookup" some key in the parent type. If so, save the parent type
# and create function that will try replaying the same lookup
# operation against arbitrary types.
if isinstance(expr, MemberExpr):
parent_expr = collapse_walrus(expr.expr)
parent_type = self.lookup_type_or_none(parent_expr)
member_name = expr.name
def replay_lookup(new_parent_type: ProperType) -> Type | None:
with self.msg.filter_errors() as w:
member_type = analyze_member_access(
name=member_name,
typ=new_parent_type,
context=parent_expr,
is_lvalue=False,
is_super=False,
is_operator=False,
msg=self.msg,
original_type=new_parent_type,
chk=self,
in_literal_context=False,
)
if w.has_new_errors():
return None
else:
return member_type
elif isinstance(expr, IndexExpr):
parent_expr = collapse_walrus(expr.base)
parent_type = self.lookup_type_or_none(parent_expr)
index_type = self.lookup_type_or_none(expr.index)
if index_type is None:
return output
str_literals = try_getting_str_literals_from_type(index_type)
if str_literals is not None:
# Refactoring these two indexing replay functions is surprisingly
# tricky -- see https://github.com/python/mypy/pull/7917, which
# was blocked by https://github.com/mypyc/mypyc/issues/586
def replay_lookup(new_parent_type: ProperType) -> Type | None:
if not isinstance(new_parent_type, TypedDictType):
return None
try:
assert str_literals is not None
member_types = [new_parent_type.items[key] for key in str_literals]
except KeyError:
return None
return make_simplified_union(member_types)
else:
int_literals = try_getting_int_literals_from_type(index_type)
if int_literals is not None:
def replay_lookup(new_parent_type: ProperType) -> Type | None:
if not isinstance(new_parent_type, TupleType):
return None
try:
assert int_literals is not None
member_types = [new_parent_type.items[key] for key in int_literals]
except IndexError:
return None
return make_simplified_union(member_types)
else:
return output
else:
return output
# If we somehow didn't previously derive the parent type, abort completely
# with what we have so far: something went wrong at an earlier stage.
if parent_type is None:
return output
# We currently only try refining the parent type if it's a Union.
# If not, there's no point in trying to refine any further parents
# since we have no further information we can use to refine the lookup
# chain, so we end early as an optimization.
parent_type = get_proper_type(parent_type)
if not isinstance(parent_type, UnionType):
return output
# Take each element in the parent union and replay the original lookup procedure
# to figure out which parents are compatible.
new_parent_types = []
for item in flatten_nested_unions(parent_type.items):
member_type = replay_lookup(get_proper_type(item))
if member_type is None:
# We were unable to obtain the member type. So, we give up on refining this
# parent type entirely and abort.
return output
if is_overlapping_types(member_type, expr_type):
new_parent_types.append(item)
# If none of the parent types overlap (if we derived an empty union), something
# went wrong. We should never hit this case, but deriving the uninhabited type or
# reporting an error both seem unhelpful. So we abort.
if not new_parent_types:
return output
expr = parent_expr
expr_type = output[parent_expr] = make_simplified_union(new_parent_types)
def refine_identity_comparison_expression(
self,
operands: list[Expression],
operand_types: list[Type],
chain_indices: list[int],
narrowable_operand_indices: AbstractSet[int],
is_valid_target: Callable[[ProperType], bool],
coerce_only_in_literal_context: bool,
) -> tuple[TypeMap, TypeMap]:
"""Produce conditional type maps refining expressions by an identity/equality comparison.
The 'operands' and 'operand_types' lists should be the full list of operands used
in the overall comparison expression. The 'chain_indices' list is the list of indices
actually used within this identity comparison chain.
So if we have the expression:
a <= b is c is d <= e
...then 'operands' and 'operand_types' would be lists of length 5 and 'chain_indices'
would be the list [1, 2, 3].
The 'narrowable_operand_indices' parameter is the set of all indices we are allowed
to refine the types of: that is, all operands that will potentially be a part of
the output TypeMaps.
Although this function could theoretically try setting the types of the operands
in the chains to the meet, doing that causes too many issues in real-world code.
Instead, we use 'is_valid_target' to identify which of the given chain types
we could plausibly use as the refined type for the expressions in the chain.
Similarly, 'coerce_only_in_literal_context' controls whether we should try coercing
expressions in the chain to a Literal type. Performing this coercion is sometimes
too aggressive of a narrowing, depending on context.
"""
should_coerce = True
if coerce_only_in_literal_context:
def should_coerce_inner(typ: Type) -> bool:
typ = get_proper_type(typ)
return is_literal_type_like(typ) or (
isinstance(typ, Instance) and typ.type.is_enum
)
should_coerce = any(should_coerce_inner(operand_types[i]) for i in chain_indices)
target: Type | None = None
possible_target_indices = []
for i in chain_indices:
expr_type = operand_types[i]
if should_coerce:
expr_type = coerce_to_literal(expr_type)
if not is_valid_target(get_proper_type(expr_type)):
continue
if target and not is_same_type(target, expr_type):
# We have multiple disjoint target types. So the 'if' branch
# must be unreachable.
return None, {}
target = expr_type
possible_target_indices.append(i)
# There's nothing we can currently infer if none of the operands are valid targets,
# so we end early and infer nothing.
if target is None:
return {}, {}
# If possible, use an unassignable expression as the target.
# We skip refining the type of the target below, so ideally we'd
# want to pick an expression we were going to skip anyways.
singleton_index = -1
for i in possible_target_indices:
if i not in narrowable_operand_indices:
singleton_index = i
# But if none of the possible singletons are unassignable ones, we give up
# and arbitrarily pick the last item, mostly because other parts of the
# type narrowing logic bias towards picking the rightmost item and it'd be
# nice to stay consistent.
#
# That said, it shouldn't matter which index we pick. For example, suppose we
# have this if statement, where 'x' and 'y' both have singleton types:
#
# if x is y:
# reveal_type(x)
# reveal_type(y)
# else:
# reveal_type(x)
# reveal_type(y)
#
# At this point, 'x' and 'y' *must* have the same singleton type: we would have
# ended early in the first for-loop in this function if they weren't.
#
# So, we should always get the same result in the 'if' case no matter which
# index we pick. And while we do end up getting different results in the 'else'
# case depending on the index (e.g. if we pick 'y', then its type stays the same
# while 'x' is narrowed to '<uninhabited>'), this distinction is also moot: mypy
# currently will just mark the whole branch as unreachable if either operand is
# narrowed to <uninhabited>.
if singleton_index == -1:
singleton_index = possible_target_indices[-1]
sum_type_name = None
target = get_proper_type(target)
if isinstance(target, LiteralType) and (
target.is_enum_literal() or isinstance(target.value, bool)
):
sum_type_name = target.fallback.type.fullname
target_type = [TypeRange(target, is_upper_bound=False)]
partial_type_maps = []
for i in chain_indices:
# If we try refining a type against itself, conditional_type_map
# will end up assuming that the 'else' branch is unreachable. This is
# typically not what we want: generally the user will intend for the
# target type to be some fixed 'sentinel' value and will want to refine
# the other exprs against this one instead.
if i == singleton_index:
continue
# Naturally, we can't refine operands which are not permitted to be refined.
if i not in narrowable_operand_indices:
continue
expr = operands[i]
expr_type = coerce_to_literal(operand_types[i])
if sum_type_name is not None:
expr_type = try_expanding_sum_type_to_union(expr_type, sum_type_name)
# We intentionally use 'conditional_types' directly here instead of
# 'self.conditional_types_with_intersection': we only compute ad-hoc
# intersections when working with pure instances.
types = conditional_types(expr_type, target_type)
partial_type_maps.append(conditional_types_to_typemaps(expr, *types))
return reduce_conditional_maps(partial_type_maps)
def refine_away_none_in_comparison(
self,
operands: list[Expression],
operand_types: list[Type],
chain_indices: list[int],
narrowable_operand_indices: AbstractSet[int],
) -> tuple[TypeMap, TypeMap]:
"""Produces conditional type maps refining away None in an identity/equality chain.
For more details about what the different arguments mean, see the
docstring of 'refine_identity_comparison_expression' up above.
"""
non_optional_types = []
for i in chain_indices:
typ = operand_types[i]
if not is_overlapping_none(typ):
non_optional_types.append(typ)
# Make sure we have a mixture of optional and non-optional types.
if len(non_optional_types) == 0 or len(non_optional_types) == len(chain_indices):
return {}, {}
if_map = {}
for i in narrowable_operand_indices:
expr_type = operand_types[i]
if not is_overlapping_none(expr_type):
continue
if any(is_overlapping_erased_types(expr_type, t) for t in non_optional_types):
if_map[operands[i]] = remove_optional(expr_type)
return if_map, {}
def is_len_of_tuple(self, expr: Expression) -> bool:
"""Is this expression a `len(x)` call where x is a tuple or union of tuples?"""
if not isinstance(expr, CallExpr):
return False
if not refers_to_fullname(expr.callee, "builtins.len"):
return False
if len(expr.args) != 1:
return False
expr = expr.args[0]
if literal(expr) != LITERAL_TYPE:
return False
if not self.has_type(expr):
return False
return self.can_be_narrowed_with_len(self.lookup_type(expr))
def can_be_narrowed_with_len(self, typ: Type) -> bool:
"""Is this a type that can benefit from length check type restrictions?
Currently supported types are TupleTypes, Instances of builtins.tuple, and
unions involving such types.
"""
if custom_special_method(typ, "__len__"):
# If user overrides builtin behavior, we can't do anything.
return False
p_typ = get_proper_type(typ)
# Note: we are conservative about tuple subclasses, because some code may rely on
# the fact that tuple_type of fallback TypeInfo matches the original TupleType.
if isinstance(p_typ, TupleType):
if any(isinstance(t, UnpackType) for t in p_typ.items):
return p_typ.partial_fallback.type.fullname == "builtins.tuple"
return True
if isinstance(p_typ, Instance):
return p_typ.type.has_base("builtins.tuple")
if isinstance(p_typ, UnionType):
return any(self.can_be_narrowed_with_len(t) for t in p_typ.items)
return False
def literal_int_expr(self, expr: Expression) -> int | None:
"""Is this expression an int literal, or a reference to an int constant?
If yes, return the corresponding int value, otherwise return None.
"""
if not self.has_type(expr):
return None
expr_type = self.lookup_type(expr)
expr_type = coerce_to_literal(expr_type)
proper_type = get_proper_type(expr_type)
if not isinstance(proper_type, LiteralType):
return None
if not isinstance(proper_type.value, int):
return None
return proper_type.value
def find_tuple_len_narrowing(self, node: ComparisonExpr) -> list[tuple[TypeMap, TypeMap]]:
"""Top-level logic to find type restrictions from a length check on tuples.
We try to detect `if` checks like the following:
x: tuple[int, int] | tuple[int, int, int]
y: tuple[int, int] | tuple[int, int, int]
if len(x) == len(y) == 2:
a, b = x # OK
c, d = y # OK
z: tuple[int, ...]
if 1 < len(z) < 4:
x = z # OK
and report corresponding type restrictions to the binder.
"""
# First step: group consecutive `is` and `==` comparisons together.
# This is essentially a simplified version of group_comparison_operands(),
# tuned to the len()-like checks. Note that we don't propagate indirect
# restrictions like e.g. `len(x) > foo() > 1` yet, since it is tricky.
# TODO: propagate indirect len() comparison restrictions.
chained = []
last_group = set()
for op, left, right in node.pairwise():
if isinstance(left, AssignmentExpr):
left = left.value
if isinstance(right, AssignmentExpr):
right = right.value
if op in ("is", "=="):
last_group.add(left)
last_group.add(right)
else:
if last_group:
chained.append(("==", list(last_group)))
last_group = set()
if op in {"is not", "!=", "<", "<=", ">", ">="}:
chained.append((op, [left, right]))
if last_group:
chained.append(("==", list(last_group)))
# Second step: infer type restrictions from each group found above.
type_maps = []
for op, items in chained:
# TODO: support unions of literal types as len() comparison targets.
if not any(self.literal_int_expr(it) is not None for it in items):
continue
if not any(self.is_len_of_tuple(it) for it in items):
continue
# At this step we know there is at least one len(x) and one literal in the group.
if op in ("is", "=="):
literal_values = set()
tuples = []
for it in items:
lit = self.literal_int_expr(it)
if lit is not None:
literal_values.add(lit)
continue
if self.is_len_of_tuple(it):
assert isinstance(it, CallExpr)
tuples.append(it.args[0])
if len(literal_values) > 1:
# More than one different literal value found, like 1 == len(x) == 2,
# so the corresponding branch is unreachable.
return [(None, {})]
size = literal_values.pop()
if size > MAX_PRECISE_TUPLE_SIZE:
# Avoid creating huge tuples from checks like if len(x) == 300.
continue
for tpl in tuples:
yes_type, no_type = self.narrow_with_len(self.lookup_type(tpl), op, size)
yes_map = None if yes_type is None else {tpl: yes_type}
no_map = None if no_type is None else {tpl: no_type}
type_maps.append((yes_map, no_map))
else:
left, right = items
if self.is_len_of_tuple(right):
# Normalize `1 < len(x)` and similar as `len(x) > 1`.
left, right = right, left
op = flip_ops.get(op, op)
r_size = self.literal_int_expr(right)
assert r_size is not None
if r_size > MAX_PRECISE_TUPLE_SIZE:
# Avoid creating huge unions from checks like if len(x) > 300.
continue
assert isinstance(left, CallExpr)
yes_type, no_type = self.narrow_with_len(
self.lookup_type(left.args[0]), op, r_size
)
yes_map = None if yes_type is None else {left.args[0]: yes_type}
no_map = None if no_type is None else {left.args[0]: no_type}
type_maps.append((yes_map, no_map))
return type_maps
def narrow_with_len(self, typ: Type, op: str, size: int) -> tuple[Type | None, Type | None]:
"""Dispatch tuple type narrowing logic depending on the kind of type we got."""
typ = get_proper_type(typ)
if isinstance(typ, TupleType):
return self.refine_tuple_type_with_len(typ, op, size)
elif isinstance(typ, Instance):
return self.refine_instance_type_with_len(typ, op, size)
elif isinstance(typ, UnionType):
yes_types = []
no_types = []
other_types = []
for t in typ.items:
if not self.can_be_narrowed_with_len(t):
other_types.append(t)
continue
yt, nt = self.narrow_with_len(t, op, size)
if yt is not None:
yes_types.append(yt)
if nt is not None:
no_types.append(nt)
yes_types += other_types
no_types += other_types
if yes_types:
yes_type = make_simplified_union(yes_types)
else:
yes_type = None
if no_types:
no_type = make_simplified_union(no_types)
else:
no_type = None
return yes_type, no_type
else:
assert False, "Unsupported type for len narrowing"
def refine_tuple_type_with_len(
self, typ: TupleType, op: str, size: int
) -> tuple[Type | None, Type | None]:
"""Narrow a TupleType using length restrictions."""
unpack_index = find_unpack_in_list(typ.items)
if unpack_index is None:
# For fixed length tuple situation is trivial, it is either reachable or not,
# depending on the current length, expected length, and the comparison op.
method = int_op_to_method[op]
if method(typ.length(), size):
return typ, None
return None, typ
unpack = typ.items[unpack_index]
assert isinstance(unpack, UnpackType)
unpacked = get_proper_type(unpack.type)
if isinstance(unpacked, TypeVarTupleType):
# For tuples involving TypeVarTuple unpack we can't do much except
# inferring reachability, and recording the restrictions on TypeVarTuple
# for further "manual" use elsewhere.
min_len = typ.length() - 1 + unpacked.min_len
if op in ("==", "is"):
if min_len <= size:
return typ, typ
return None, typ
elif op in ("<", "<="):
if op == "<=":
size += 1
if min_len < size:
prefix = typ.items[:unpack_index]
suffix = typ.items[unpack_index + 1 :]
# TODO: also record max_len to avoid false negatives?
unpack = UnpackType(unpacked.copy_modified(min_len=size - typ.length() + 1))
return typ, typ.copy_modified(items=prefix + [unpack] + suffix)
return None, typ
else:
yes_type, no_type = self.refine_tuple_type_with_len(typ, neg_ops[op], size)
return no_type, yes_type
# Homogeneous variadic item is the case where we are most flexible. Essentially,
# we adjust the variadic item by "eating away" from it to satisfy the restriction.
assert isinstance(unpacked, Instance) and unpacked.type.fullname == "builtins.tuple"
min_len = typ.length() - 1
arg = unpacked.args[0]
prefix = typ.items[:unpack_index]
suffix = typ.items[unpack_index + 1 :]
if op in ("==", "is"):
if min_len <= size:
# TODO: return fixed union + prefixed variadic tuple for no_type?
return typ.copy_modified(items=prefix + [arg] * (size - min_len) + suffix), typ
return None, typ
elif op in ("<", "<="):
if op == "<=":
size += 1
if min_len < size:
# Note: there is some ambiguity w.r.t. to where to put the additional
# items: before or after the unpack. However, such types are equivalent,
# so we always put them before for consistency.
no_type = typ.copy_modified(
items=prefix + [arg] * (size - min_len) + [unpack] + suffix
)
yes_items = []
for n in range(size - min_len):
yes_items.append(typ.copy_modified(items=prefix + [arg] * n + suffix))
return UnionType.make_union(yes_items, typ.line, typ.column), no_type
return None, typ
else:
yes_type, no_type = self.refine_tuple_type_with_len(typ, neg_ops[op], size)
return no_type, yes_type
def refine_instance_type_with_len(
self, typ: Instance, op: str, size: int
) -> tuple[Type | None, Type | None]:
"""Narrow a homogeneous tuple using length restrictions."""
base = map_instance_to_supertype(typ, self.lookup_typeinfo("builtins.tuple"))
arg = base.args[0]
# Again, we are conservative about subclasses until we gain more confidence.
allow_precise = (
PRECISE_TUPLE_TYPES in self.options.enable_incomplete_feature
) and typ.type.fullname == "builtins.tuple"
if op in ("==", "is"):
# TODO: return fixed union + prefixed variadic tuple for no_type?
return TupleType(items=[arg] * size, fallback=typ), typ
elif op in ("<", "<="):
if op == "<=":
size += 1
if allow_precise:
unpack = UnpackType(self.named_generic_type("builtins.tuple", [arg]))
no_type: Type | None = TupleType(items=[arg] * size + [unpack], fallback=typ)
else:
no_type = typ
if allow_precise:
items = []
for n in range(size):
items.append(TupleType([arg] * n, fallback=typ))
yes_type: Type | None = UnionType.make_union(items, typ.line, typ.column)
else:
yes_type = typ
return yes_type, no_type
else:
yes_type, no_type = self.refine_instance_type_with_len(typ, neg_ops[op], size)
return no_type, yes_type
#
# Helpers
#
@overload
def check_subtype(
self,
subtype: Type,
supertype: Type,
context: Context,
msg: str,
subtype_label: str | None = None,
supertype_label: str | None = None,
*,
notes: list[str] | None = None,
code: ErrorCode | None = None,
outer_context: Context | None = None,
) -> bool: ...
@overload
def check_subtype(
self,
subtype: Type,
supertype: Type,
context: Context,
msg: ErrorMessage,
subtype_label: str | None = None,
supertype_label: str | None = None,
*,
notes: list[str] | None = None,
outer_context: Context | None = None,
) -> bool: ...
def check_subtype(
self,
subtype: Type,
supertype: Type,
context: Context,
msg: str | ErrorMessage,
subtype_label: str | None = None,
supertype_label: str | None = None,
*,
notes: list[str] | None = None,
code: ErrorCode | None = None,
outer_context: Context | None = None,
) -> bool:
"""Generate an error if the subtype is not compatible with supertype."""
if is_subtype(subtype, supertype, options=self.options):
return True
if isinstance(msg, str):
msg = ErrorMessage(msg, code=code)
if self.msg.prefer_simple_messages():
self.fail(msg, context) # Fast path -- skip all fancy logic
return False
orig_subtype = subtype
subtype = get_proper_type(subtype)
orig_supertype = supertype
supertype = get_proper_type(supertype)
if self.msg.try_report_long_tuple_assignment_error(
subtype, supertype, context, msg, subtype_label, supertype_label
):
return False
extra_info: list[str] = []
note_msg = ""
notes = notes or []
if subtype_label is not None or supertype_label is not None:
subtype_str, supertype_str = format_type_distinctly(
orig_subtype, orig_supertype, options=self.options
)
if subtype_label is not None:
extra_info.append(subtype_label + " " + subtype_str)
if supertype_label is not None:
extra_info.append(supertype_label + " " + supertype_str)
note_msg = make_inferred_type_note(
outer_context or context, subtype, supertype, supertype_str
)
if isinstance(subtype, Instance) and isinstance(supertype, Instance):
notes = append_invariance_notes(notes, subtype, supertype)
if isinstance(subtype, UnionType) and isinstance(supertype, UnionType):
notes = append_union_note(notes, subtype, supertype, self.options)
if extra_info:
msg = msg.with_additional_msg(" (" + ", ".join(extra_info) + ")")
self.fail(msg, context)
for note in notes:
self.msg.note(note, context, code=msg.code)
if note_msg:
self.note(note_msg, context, code=msg.code)
self.msg.maybe_note_concatenate_pos_args(subtype, supertype, context, code=msg.code)
if (
isinstance(supertype, Instance)
and supertype.type.is_protocol
and isinstance(subtype, (CallableType, Instance, TupleType, TypedDictType))
):
self.msg.report_protocol_problems(subtype, supertype, context, code=msg.code)
if isinstance(supertype, CallableType) and isinstance(subtype, Instance):
call = find_member("__call__", subtype, subtype, is_operator=True)
if call:
self.msg.note_call(subtype, call, context, code=msg.code)
if isinstance(subtype, (CallableType, Overloaded)) and isinstance(supertype, Instance):
if supertype.type.is_protocol and "__call__" in supertype.type.protocol_members:
call = find_member("__call__", supertype, subtype, is_operator=True)
assert call is not None
if not is_subtype(subtype, call, options=self.options):
self.msg.note_call(supertype, call, context, code=msg.code)
self.check_possible_missing_await(subtype, supertype, context, code=msg.code)
return False
def get_precise_awaitable_type(self, typ: Type, local_errors: ErrorWatcher) -> Type | None:
"""If type implements Awaitable[X] with non-Any X, return X.
In all other cases return None. This method must be called in context
of local_errors.
"""
if isinstance(get_proper_type(typ), PartialType):
# Partial types are special, ignore them here.
return None
try:
aw_type = self.expr_checker.check_awaitable_expr(
typ, Context(), "", ignore_binder=True
)
except KeyError:
# This is a hack to speed up tests by not including Awaitable in all typing stubs.
return None
if local_errors.has_new_errors():
return None
if isinstance(get_proper_type(aw_type), (AnyType, UnboundType)):
return None
return aw_type
@contextmanager
def checking_await_set(self) -> Iterator[None]:
self.checking_missing_await = True
try:
yield
finally:
self.checking_missing_await = False
def check_possible_missing_await(
self, subtype: Type, supertype: Type, context: Context, code: ErrorCode | None
) -> None:
"""Check if the given type becomes a subtype when awaited."""
if self.checking_missing_await:
# Avoid infinite recursion.
return
with self.checking_await_set(), self.msg.filter_errors() as local_errors:
aw_type = self.get_precise_awaitable_type(subtype, local_errors)
if aw_type is None:
return
if not self.check_subtype(
aw_type, supertype, context, msg=message_registry.INCOMPATIBLE_TYPES
):
return
self.msg.possible_missing_await(context, code)
def named_type(self, name: str) -> Instance:
"""Return an instance type with given name and implicit Any type args.
For example, named_type('builtins.object') produces the 'object' type.
"""
# Assume that the name refers to a type.
sym = self.lookup_qualified(name)
node = sym.node
if isinstance(node, TypeAlias):
assert isinstance(node.target, Instance) # type: ignore[misc]
node = node.target.type
assert isinstance(node, TypeInfo)
any_type = AnyType(TypeOfAny.from_omitted_generics)
return Instance(node, [any_type] * len(node.defn.type_vars))
def named_generic_type(self, name: str, args: list[Type]) -> Instance:
"""Return an instance with the given name and type arguments.
Assume that the number of arguments is correct. Assume that
the name refers to a compatible generic type.
"""
info = self.lookup_typeinfo(name)
args = [remove_instance_last_known_values(arg) for arg in args]
# TODO: assert len(args) == len(info.defn.type_vars)
return Instance(info, args)
def lookup_typeinfo(self, fullname: str) -> TypeInfo:
# Assume that the name refers to a class.
sym = self.lookup_qualified(fullname)
node = sym.node
assert isinstance(node, TypeInfo)
return node
def type_type(self) -> Instance:
"""Return instance type 'type'."""
return self.named_type("builtins.type")
def str_type(self) -> Instance:
"""Return instance type 'str'."""
return self.named_type("builtins.str")
def store_type(self, node: Expression, typ: Type) -> None:
"""Store the type of a node in the type map."""
self._type_maps[-1][node] = typ
def has_type(self, node: Expression) -> bool:
return any(node in m for m in reversed(self._type_maps))
def lookup_type_or_none(self, node: Expression) -> Type | None:
for m in reversed(self._type_maps):
if node in m:
return m[node]
return None
def lookup_type(self, node: Expression) -> Type:
for m in reversed(self._type_maps):
t = m.get(node)
if t is not None:
return t
raise KeyError(node)
def store_types(self, d: dict[Expression, Type]) -> None:
self._type_maps[-1].update(d)
@contextmanager
def local_type_map(self) -> Iterator[dict[Expression, Type]]:
"""Store inferred types into a temporary type map (returned).
This can be used to perform type checking "experiments" without
affecting exported types (which are used by mypyc).
"""
temp_type_map: dict[Expression, Type] = {}
self._type_maps.append(temp_type_map)
yield temp_type_map
self._type_maps.pop()
def in_checked_function(self) -> bool:
"""Should we type-check the current function?
- Yes if --check-untyped-defs is set.
- Yes outside functions.
- Yes in annotated functions.
- No otherwise.
"""
return (
self.options.check_untyped_defs or not self.dynamic_funcs or not self.dynamic_funcs[-1]
)
def lookup(self, name: str) -> SymbolTableNode:
"""Look up a definition from the symbol table with the given name."""
if name in self.globals:
return self.globals[name]
else:
b = self.globals.get("__builtins__", None)
if b:
assert isinstance(b.node, MypyFile)
table = b.node.names
if name in table:
return table[name]
raise KeyError(f"Failed lookup: {name}")
def lookup_qualified(self, name: str) -> SymbolTableNode:
if "." not in name:
return self.lookup(name)
else:
parts = name.split(".")
n = self.modules[parts[0]]
for i in range(1, len(parts) - 1):
sym = n.names.get(parts[i])
assert sym is not None, "Internal error: attempted lookup of unknown name"
assert isinstance(sym.node, MypyFile)
n = sym.node
last = parts[-1]
if last in n.names:
return n.names[last]
elif len(parts) == 2 and parts[0] in ("builtins", "typing"):
fullname = ".".join(parts)
if fullname in SUGGESTED_TEST_FIXTURES:
suggestion = ", e.g. add '[{} fixtures/{}]' to your test".format(
parts[0], SUGGESTED_TEST_FIXTURES[fullname]
)
else:
suggestion = ""
raise KeyError(
"Could not find builtin symbol '{}' (If you are running a "
"test case, use a fixture that "
"defines this symbol{})".format(last, suggestion)
)
else:
msg = "Failed qualified lookup: '{}' (fullname = '{}')."
raise KeyError(msg.format(last, name))
@contextmanager
def enter_partial_types(
self, *, is_function: bool = False, is_class: bool = False
) -> Iterator[None]:
"""Enter a new scope for collecting partial types.
Also report errors for (some) variables which still have partial
types, i.e. we couldn't infer a complete type.
"""
is_local = (self.partial_types and self.partial_types[-1].is_local) or is_function
self.partial_types.append(PartialTypeScope({}, is_function, is_local))
yield
# Don't complain about not being able to infer partials if it is
# at the toplevel (with allow_untyped_globals) or if it is in an
# untyped function being checked with check_untyped_defs.
permissive = (self.options.allow_untyped_globals and not is_local) or (
self.options.check_untyped_defs and self.dynamic_funcs and self.dynamic_funcs[-1]
)
partial_types, _, _ = self.partial_types.pop()
if not self.current_node_deferred:
for var, context in partial_types.items():
# If we require local partial types, there are a few exceptions where
# we fall back to inferring just "None" as the type from a None initializer:
#
# 1. If all happens within a single function this is acceptable, since only
# the topmost function is a separate target in fine-grained incremental mode.
# We primarily want to avoid "splitting" partial types across targets.
#
# 2. A None initializer in the class body if the attribute is defined in a base
# class is fine, since the attribute is already defined and it's currently okay
# to vary the type of an attribute covariantly. The None type will still be
# checked for compatibility with base classes elsewhere. Without this exception
# mypy could require an annotation for an attribute that already has been
# declared in a base class, which would be bad.
allow_none = (
not self.options.local_partial_types
or is_function
or (is_class and self.is_defined_in_base_class(var))
)
if (
allow_none
and isinstance(var.type, PartialType)
and var.type.type is None
and not permissive
):
var.type = NoneType()
else:
if var not in self.partial_reported and not permissive:
self.msg.need_annotation_for_var(var, context, self.options.python_version)
self.partial_reported.add(var)
if var.type:
fixed = fixup_partial_type(var.type)
var.invalid_partial_type = fixed != var.type
var.type = fixed
def handle_partial_var_type(
self, typ: PartialType, is_lvalue: bool, node: Var, context: Context
) -> Type:
"""Handle a reference to a partial type through a var.
(Used by checkexpr and checkmember.)
"""
in_scope, is_local, partial_types = self.find_partial_types_in_all_scopes(node)
if typ.type is None and in_scope:
# 'None' partial type. It has a well-defined type. In an lvalue context
# we want to preserve the knowledge of it being a partial type.
if not is_lvalue:
return NoneType()
else:
return typ
else:
if partial_types is not None and not self.current_node_deferred:
if in_scope:
context = partial_types[node]
if is_local or not self.options.allow_untyped_globals:
self.msg.need_annotation_for_var(
node, context, self.options.python_version
)
self.partial_reported.add(node)
else:
# Defer the node -- we might get a better type in the outer scope
self.handle_cannot_determine_type(node.name, context)
return fixup_partial_type(typ)
def is_defined_in_base_class(self, var: Var) -> bool:
if not var.info:
return False
return var.info.fallback_to_any or any(
base.get(var.name) is not None for base in var.info.mro[1:]
)
def find_partial_types(self, var: Var) -> dict[Var, Context] | None:
"""Look for an active partial type scope containing variable.
A scope is active if assignments in the current context can refine a partial
type originally defined in the scope. This is affected by the local_partial_types
configuration option.
"""
in_scope, _, partial_types = self.find_partial_types_in_all_scopes(var)
if in_scope:
return partial_types
return None
def find_partial_types_in_all_scopes(
self, var: Var
) -> tuple[bool, bool, dict[Var, Context] | None]:
"""Look for partial type scope containing variable.
Return tuple (is the scope active, is the scope a local scope, scope).
"""
for scope in reversed(self.partial_types):
if var in scope.map:
# All scopes within the outermost function are active. Scopes out of
# the outermost function are inactive to allow local reasoning (important
# for fine-grained incremental mode).
disallow_other_scopes = self.options.local_partial_types
if isinstance(var.type, PartialType) and var.type.type is not None and var.info:
# This is an ugly hack to make partial generic self attributes behave
# as if --local-partial-types is always on (because it used to be like this).
disallow_other_scopes = True
scope_active = (
not disallow_other_scopes or scope.is_local == self.partial_types[-1].is_local
)
return scope_active, scope.is_local, scope.map
return False, False, None
def temp_node(self, t: Type, context: Context | None = None) -> TempNode:
"""Create a temporary node with the given, fixed type."""
return TempNode(t, context=context)
def fail(
self, msg: str | ErrorMessage, context: Context, *, code: ErrorCode | None = None
) -> None:
"""Produce an error message."""
if isinstance(msg, ErrorMessage):
self.msg.fail(msg.value, context, code=msg.code)
return
self.msg.fail(msg, context, code=code)
def note(
self,
msg: str | ErrorMessage,
context: Context,
offset: int = 0,
*,
code: ErrorCode | None = None,
) -> None:
"""Produce a note."""
if isinstance(msg, ErrorMessage):
self.msg.note(msg.value, context, code=msg.code)
return
self.msg.note(msg, context, offset=offset, code=code)
def iterable_item_type(
self, it: Instance | CallableType | TypeType | Overloaded, context: Context
) -> Type:
if isinstance(it, Instance):
iterable = map_instance_to_supertype(it, self.lookup_typeinfo("typing.Iterable"))
item_type = iterable.args[0]
if not isinstance(get_proper_type(item_type), AnyType):
# This relies on 'map_instance_to_supertype' returning 'Iterable[Any]'
# in case there is no explicit base class.
return item_type
# Try also structural typing.
return self.analyze_iterable_item_type_without_expression(it, context)[1]
def function_type(self, func: FuncBase) -> FunctionLike:
return function_type(func, self.named_type("builtins.function"))
def push_type_map(self, type_map: TypeMap) -> None:
if type_map is None:
self.binder.unreachable()
else:
for expr, type in type_map.items():
self.binder.put(expr, type)
def infer_issubclass_maps(self, node: CallExpr, expr: Expression) -> tuple[TypeMap, TypeMap]:
"""Infer type restrictions for an expression in issubclass call."""
vartype = self.lookup_type(expr)
type = self.get_isinstance_type(node.args[1])
if isinstance(vartype, TypeVarType):
vartype = vartype.upper_bound
vartype = get_proper_type(vartype)
if isinstance(vartype, UnionType):
union_list = []
for t in get_proper_types(vartype.items):
if isinstance(t, TypeType):
union_list.append(t.item)
else:
# This is an error that should be reported earlier
# if we reach here, we refuse to do any type inference.
return {}, {}
vartype = UnionType(union_list)
elif isinstance(vartype, TypeType):
vartype = vartype.item
elif isinstance(vartype, Instance) and vartype.type.is_metaclass():
vartype = self.named_type("builtins.object")
else:
# Any other object whose type we don't know precisely
# for example, Any or a custom metaclass.
return {}, {} # unknown type
yes_type, no_type = self.conditional_types_with_intersection(vartype, type, expr)
yes_map, no_map = conditional_types_to_typemaps(expr, yes_type, no_type)
yes_map, no_map = map(convert_to_typetype, (yes_map, no_map))
return yes_map, no_map
@overload
def conditional_types_with_intersection(
self,
expr_type: Type,
type_ranges: list[TypeRange] | None,
ctx: Context,
default: None = None,
) -> tuple[Type | None, Type | None]: ...
@overload
def conditional_types_with_intersection(
self, expr_type: Type, type_ranges: list[TypeRange] | None, ctx: Context, default: Type
) -> tuple[Type, Type]: ...
def conditional_types_with_intersection(
self,
expr_type: Type,
type_ranges: list[TypeRange] | None,
ctx: Context,
default: Type | None = None,
) -> tuple[Type | None, Type | None]:
initial_types = conditional_types(expr_type, type_ranges, default)
# For some reason, doing "yes_map, no_map = conditional_types_to_typemaps(...)"
# doesn't work: mypyc will decide that 'yes_map' is of type None if we try.
yes_type: Type | None = initial_types[0]
no_type: Type | None = initial_types[1]
if not isinstance(get_proper_type(yes_type), UninhabitedType) or type_ranges is None:
return yes_type, no_type
# If conditional_types was unable to successfully narrow the expr_type
# using the type_ranges and concluded if-branch is unreachable, we try
# computing it again using a different algorithm that tries to generate
# an ad-hoc intersection between the expr_type and the type_ranges.
proper_type = get_proper_type(expr_type)
if isinstance(proper_type, UnionType):
possible_expr_types = get_proper_types(proper_type.relevant_items())
else:
possible_expr_types = [proper_type]
possible_target_types = []
for tr in type_ranges:
item = get_proper_type(tr.item)
if isinstance(item, (Instance, NoneType)):
possible_target_types.append(item)
if not possible_target_types:
return yes_type, no_type
out = []
errors: list[tuple[str, str]] = []
for v in possible_expr_types:
if not isinstance(v, Instance):
return yes_type, no_type
for t in possible_target_types:
if isinstance(t, NoneType):
errors.append((f'"{v.type.name}" and "NoneType"', '"NoneType" is final'))
continue
intersection = self.intersect_instances((v, t), errors)
if intersection is None:
continue
out.append(intersection)
if not out:
# Only report errors if no element in the union worked.
if self.should_report_unreachable_issues():
for types, reason in errors:
self.msg.impossible_intersection(types, reason, ctx)
return UninhabitedType(), expr_type
new_yes_type = make_simplified_union(out)
return new_yes_type, expr_type
def is_writable_attribute(self, node: Node) -> bool:
"""Check if an attribute is writable"""
if isinstance(node, Var):
if node.is_property and not node.is_settable_property:
return False
return True
elif isinstance(node, OverloadedFuncDef) and node.is_property:
first_item = node.items[0]
assert isinstance(first_item, Decorator)
return first_item.var.is_settable_property
return False
def get_isinstance_type(self, expr: Expression) -> list[TypeRange] | None:
if isinstance(expr, OpExpr) and expr.op == "|":
left = self.get_isinstance_type(expr.left)
if left is None and is_literal_none(expr.left):
left = [TypeRange(NoneType(), is_upper_bound=False)]
right = self.get_isinstance_type(expr.right)
if right is None and is_literal_none(expr.right):
right = [TypeRange(NoneType(), is_upper_bound=False)]
if left is None or right is None:
return None
return left + right
all_types = get_proper_types(flatten_types(self.lookup_type(expr)))
types: list[TypeRange] = []
for typ in all_types:
if isinstance(typ, FunctionLike) and typ.is_type_obj():
# Type variables may be present -- erase them, which is the best
# we can do (outside disallowing them here).
erased_type = erase_typevars(typ.items[0].ret_type)
types.append(TypeRange(erased_type, is_upper_bound=False))
elif isinstance(typ, TypeType):
# Type[A] means "any type that is a subtype of A" rather than "precisely type A"
# we indicate this by setting is_upper_bound flag
is_upper_bound = True
if isinstance(typ.item, NoneType):
# except for Type[None], because "'NoneType' is not an acceptable base type"
is_upper_bound = False
types.append(TypeRange(typ.item, is_upper_bound=is_upper_bound))
elif isinstance(typ, Instance) and typ.type.fullname == "builtins.type":
object_type = Instance(typ.type.mro[-1], [])
types.append(TypeRange(object_type, is_upper_bound=True))
elif isinstance(typ, Instance) and typ.type.fullname == "types.UnionType" and typ.args:
types.append(TypeRange(UnionType(typ.args), is_upper_bound=False))
elif isinstance(typ, AnyType):
types.append(TypeRange(typ, is_upper_bound=False))
else: # we didn't see an actual type, but rather a variable with unknown value
return None
if not types:
# this can happen if someone has empty tuple as 2nd argument to isinstance
# strictly speaking, we should return UninhabitedType but for simplicity we will simply
# refuse to do any type inference for now
return None
return types
def is_literal_enum(self, n: Expression) -> bool:
"""Returns true if this expression (with the given type context) is an Enum literal.
For example, if we had an enum:
class Foo(Enum):
A = 1
B = 2
...and if the expression 'Foo' referred to that enum within the current type context,
then the expression 'Foo.A' would be a literal enum. However, if we did 'a = Foo.A',
then the variable 'a' would *not* be a literal enum.
We occasionally special-case expressions like 'Foo.A' and treat them as a single primitive
unit for the same reasons we sometimes treat 'True', 'False', or 'None' as a single
primitive unit.
"""
if not isinstance(n, MemberExpr) or not isinstance(n.expr, NameExpr):
return False
parent_type = self.lookup_type_or_none(n.expr)
member_type = self.lookup_type_or_none(n)
if member_type is None or parent_type is None:
return False
parent_type = get_proper_type(parent_type)
member_type = get_proper_type(coerce_to_literal(member_type))
if not isinstance(parent_type, FunctionLike) or not isinstance(member_type, LiteralType):
return False
if not parent_type.is_type_obj():
return False
return (
member_type.is_enum_literal()
and member_type.fallback.type == parent_type.type_object()
)
def add_any_attribute_to_type(self, typ: Type, name: str) -> Type:
"""Inject an extra attribute with Any type using fallbacks."""
orig_typ = typ
typ = get_proper_type(typ)
any_type = AnyType(TypeOfAny.unannotated)
if isinstance(typ, Instance):
result = typ.copy_with_extra_attr(name, any_type)
# For instances, we erase the possible module name, so that restrictions
# become anonymous types.ModuleType instances, allowing hasattr() to
# have effect on modules.
assert result.extra_attrs is not None
result.extra_attrs.mod_name = None
return result
if isinstance(typ, TupleType):
fallback = typ.partial_fallback.copy_with_extra_attr(name, any_type)
return typ.copy_modified(fallback=fallback)
if isinstance(typ, CallableType):
fallback = typ.fallback.copy_with_extra_attr(name, any_type)
return typ.copy_modified(fallback=fallback)
if isinstance(typ, TypeType) and isinstance(typ.item, Instance):
return TypeType.make_normalized(self.add_any_attribute_to_type(typ.item, name))
if isinstance(typ, TypeVarType):
return typ.copy_modified(
upper_bound=self.add_any_attribute_to_type(typ.upper_bound, name),
values=[self.add_any_attribute_to_type(v, name) for v in typ.values],
)
if isinstance(typ, UnionType):
with_attr, without_attr = self.partition_union_by_attr(typ, name)
return make_simplified_union(
with_attr + [self.add_any_attribute_to_type(typ, name) for typ in without_attr]
)
return orig_typ
def hasattr_type_maps(
self, expr: Expression, source_type: Type, name: str
) -> tuple[TypeMap, TypeMap]:
"""Simple support for hasattr() checks.
Essentially the logic is following:
* In the if branch, keep types that already has a valid attribute as is,
for other inject an attribute with `Any` type.
* In the else branch, remove types that already have a valid attribute,
while keeping the rest.
"""
if self.has_valid_attribute(source_type, name):
return {expr: source_type}, {}
source_type = get_proper_type(source_type)
if isinstance(source_type, UnionType):
_, without_attr = self.partition_union_by_attr(source_type, name)
yes_map = {expr: self.add_any_attribute_to_type(source_type, name)}
return yes_map, {expr: make_simplified_union(without_attr)}
type_with_attr = self.add_any_attribute_to_type(source_type, name)
if type_with_attr != source_type:
return {expr: type_with_attr}, {}
return {}, {}
def partition_union_by_attr(
self, source_type: UnionType, name: str
) -> tuple[list[Type], list[Type]]:
with_attr = []
without_attr = []
for item in source_type.items:
if self.has_valid_attribute(item, name):
with_attr.append(item)
else:
without_attr.append(item)
return with_attr, without_attr
def has_valid_attribute(self, typ: Type, name: str) -> bool:
p_typ = get_proper_type(typ)
if isinstance(p_typ, AnyType):
return False
if isinstance(p_typ, Instance) and p_typ.extra_attrs and p_typ.extra_attrs.mod_name:
# Presence of module_symbol_table means this check will skip ModuleType.__getattr__
module_symbol_table = p_typ.type.names
else:
module_symbol_table = None
with self.msg.filter_errors() as watcher:
analyze_member_access(
name,
typ,
TempNode(AnyType(TypeOfAny.special_form)),
is_lvalue=False,
is_super=False,
is_operator=False,
msg=self.msg,
original_type=typ,
chk=self,
# This is not a real attribute lookup so don't mess with deferring nodes.
no_deferral=True,
module_symbol_table=module_symbol_table,
)
return not watcher.has_new_errors()
def get_expression_type(self, node: Expression, type_context: Type | None = None) -> Type:
return self.expr_checker.accept(node, type_context=type_context)
class CollectArgTypeVarTypes(TypeTraverserVisitor):
"""Collects the non-nested argument types in a set."""
def __init__(self) -> None:
self.arg_types: set[TypeVarType] = set()
def visit_type_var(self, t: TypeVarType) -> None:
self.arg_types.add(t)
@overload
def conditional_types(
current_type: Type, proposed_type_ranges: list[TypeRange] | None, default: None = None
) -> tuple[Type | None, Type | None]: ...
@overload
def conditional_types(
current_type: Type, proposed_type_ranges: list[TypeRange] | None, default: Type
) -> tuple[Type, Type]: ...
def conditional_types(
current_type: Type, proposed_type_ranges: list[TypeRange] | None, default: Type | None = None
) -> tuple[Type | None, Type | None]:
"""Takes in the current type and a proposed type of an expression.
Returns a 2-tuple: The first element is the proposed type, if the expression
can be the proposed type. The second element is the type it would hold
if it was not the proposed type, if any. UninhabitedType means unreachable.
None means no new information can be inferred. If default is set it is returned
instead."""
if proposed_type_ranges:
if len(proposed_type_ranges) == 1:
target = proposed_type_ranges[0].item
target = get_proper_type(target)
if isinstance(target, LiteralType) and (
target.is_enum_literal() or isinstance(target.value, bool)
):
enum_name = target.fallback.type.fullname
current_type = try_expanding_sum_type_to_union(current_type, enum_name)
proposed_items = [type_range.item for type_range in proposed_type_ranges]
proposed_type = make_simplified_union(proposed_items)
if isinstance(proposed_type, AnyType):
# We don't really know much about the proposed type, so we shouldn't
# attempt to narrow anything. Instead, we broaden the expr to Any to
# avoid false positives
return proposed_type, default
elif not any(
type_range.is_upper_bound for type_range in proposed_type_ranges
) and is_proper_subtype(current_type, proposed_type, ignore_promotions=True):
# Expression is always of one of the types in proposed_type_ranges
return default, UninhabitedType()
elif not is_overlapping_types(
current_type, proposed_type, prohibit_none_typevar_overlap=True, ignore_promotions=True
):
# Expression is never of any type in proposed_type_ranges
return UninhabitedType(), default
else:
# we can only restrict when the type is precise, not bounded
proposed_precise_type = UnionType.make_union(
[
type_range.item
for type_range in proposed_type_ranges
if not type_range.is_upper_bound
]
)
remaining_type = restrict_subtype_away(current_type, proposed_precise_type)
return proposed_type, remaining_type
else:
# An isinstance check, but we don't understand the type
return current_type, default
def conditional_types_to_typemaps(
expr: Expression, yes_type: Type | None, no_type: Type | None
) -> tuple[TypeMap, TypeMap]:
expr = collapse_walrus(expr)
maps: list[TypeMap] = []
for typ in (yes_type, no_type):
proper_type = get_proper_type(typ)
if isinstance(proper_type, UninhabitedType):
maps.append(None)
elif proper_type is None:
maps.append({})
else:
assert typ is not None
maps.append({expr: typ})
return cast(Tuple[TypeMap, TypeMap], tuple(maps))
def gen_unique_name(base: str, table: SymbolTable) -> str:
"""Generate a name that does not appear in table by appending numbers to base."""
if base not in table:
return base
i = 1
while base + str(i) in table:
i += 1
return base + str(i)
def is_true_literal(n: Expression) -> bool:
"""Returns true if this expression is the 'True' literal/keyword."""
return refers_to_fullname(n, "builtins.True") or isinstance(n, IntExpr) and n.value != 0
def is_false_literal(n: Expression) -> bool:
"""Returns true if this expression is the 'False' literal/keyword."""
return refers_to_fullname(n, "builtins.False") or isinstance(n, IntExpr) and n.value == 0
def is_literal_none(n: Expression) -> bool:
"""Returns true if this expression is the 'None' literal/keyword."""
return isinstance(n, NameExpr) and n.fullname == "builtins.None"
def is_literal_not_implemented(n: Expression) -> bool:
return isinstance(n, NameExpr) and n.fullname == "builtins.NotImplemented"
def _is_empty_generator_function(func: FuncItem) -> bool:
"""
Checks whether a function's body is 'return; yield' (the yield being added only
to promote the function into a generator function).
"""
body = func.body.body
return (
len(body) == 2
and isinstance(ret_stmt := body[0], ReturnStmt)
and (ret_stmt.expr is None or is_literal_none(ret_stmt.expr))
and isinstance(expr_stmt := body[1], ExpressionStmt)
and isinstance(yield_expr := expr_stmt.expr, YieldExpr)
and (yield_expr.expr is None or is_literal_none(yield_expr.expr))
)
def builtin_item_type(tp: Type) -> Type | None:
"""Get the item type of a builtin container.
If 'tp' is not one of the built containers (these includes NamedTuple and TypedDict)
or if the container is not parameterized (like List or List[Any])
return None. This function is used to narrow optional types in situations like this:
x: Optional[int]
if x in (1, 2, 3):
x + 42 # OK
Note: this is only OK for built-in containers, where we know the behavior
of __contains__.
"""
tp = get_proper_type(tp)
if isinstance(tp, Instance):
if tp.type.fullname in [
"builtins.list",
"builtins.tuple",
"builtins.dict",
"builtins.set",
"builtins.frozenset",
"_collections_abc.dict_keys",
"typing.KeysView",
]:
if not tp.args:
# TODO: fix tuple in lib-stub/builtins.pyi (it should be generic).
return None
if not isinstance(get_proper_type(tp.args[0]), AnyType):
return tp.args[0]
elif isinstance(tp, TupleType):
normalized_items = []
for it in tp.items:
# This use case is probably rare, but not handling unpacks here can cause crashes.
if isinstance(it, UnpackType):
unpacked = get_proper_type(it.type)
if isinstance(unpacked, TypeVarTupleType):
unpacked = get_proper_type(unpacked.upper_bound)
assert (
isinstance(unpacked, Instance) and unpacked.type.fullname == "builtins.tuple"
)
normalized_items.append(unpacked.args[0])
else:
normalized_items.append(it)
if all(not isinstance(it, AnyType) for it in get_proper_types(normalized_items)):
return make_simplified_union(normalized_items) # this type is not externally visible
elif isinstance(tp, TypedDictType):
# TypedDict always has non-optional string keys. Find the key type from the Mapping
# base class.
for base in tp.fallback.type.mro:
if base.fullname == "typing.Mapping":
return map_instance_to_supertype(tp.fallback, base).args[0]
assert False, "No Mapping base class found for TypedDict fallback"
return None
def and_conditional_maps(m1: TypeMap, m2: TypeMap, use_meet: bool = False) -> TypeMap:
"""Calculate what information we can learn from the truth of (e1 and e2)
in terms of the information that we can learn from the truth of e1 and
the truth of e2.
"""
if m1 is None or m2 is None:
# One of the conditions can never be true.
return None
# Both conditions can be true; combine the information. Anything
# we learn from either conditions' truth is valid. If the same
# expression's type is refined by both conditions, we somewhat
# arbitrarily give precedence to m2 unless m1 value is Any.
# In the future, we could use an intersection type or meet_types().
result = m2.copy()
m2_keys = {literal_hash(n2) for n2 in m2}
for n1 in m1:
if literal_hash(n1) not in m2_keys or isinstance(get_proper_type(m1[n1]), AnyType):
result[n1] = m1[n1]
if use_meet:
# For now, meet common keys only if specifically requested.
# This is currently used for tuple types narrowing, where having
# a precise result is important.
for n1 in m1:
for n2 in m2:
if literal_hash(n1) == literal_hash(n2):
result[n1] = meet_types(m1[n1], m2[n2])
return result
def or_conditional_maps(m1: TypeMap, m2: TypeMap, coalesce_any: bool = False) -> TypeMap:
"""Calculate what information we can learn from the truth of (e1 or e2)
in terms of the information that we can learn from the truth of e1 and
the truth of e2. If coalesce_any is True, consider Any a supertype when
joining restrictions.
"""
if m1 is None:
return m2
if m2 is None:
return m1
# Both conditions can be true. Combine information about
# expressions whose type is refined by both conditions. (We do not
# learn anything about expressions whose type is refined by only
# one condition.)
result: dict[Expression, Type] = {}
for n1 in m1:
for n2 in m2:
if literal_hash(n1) == literal_hash(n2):
if coalesce_any and isinstance(get_proper_type(m1[n1]), AnyType):
result[n1] = m1[n1]
else:
result[n1] = make_simplified_union([m1[n1], m2[n2]])
return result
def reduce_conditional_maps(
type_maps: list[tuple[TypeMap, TypeMap]], use_meet: bool = False
) -> tuple[TypeMap, TypeMap]:
"""Reduces a list containing pairs of if/else TypeMaps into a single pair.
We "and" together all of the if TypeMaps and "or" together the else TypeMaps. So
for example, if we had the input:
[
({x: TypeIfX, shared: TypeIfShared1}, {x: TypeElseX, shared: TypeElseShared1}),
({y: TypeIfY, shared: TypeIfShared2}, {y: TypeElseY, shared: TypeElseShared2}),
]
...we'd return the output:
(
{x: TypeIfX, y: TypeIfY, shared: PseudoIntersection[TypeIfShared1, TypeIfShared2]},
{shared: Union[TypeElseShared1, TypeElseShared2]},
)
...where "PseudoIntersection[X, Y] == Y" because mypy actually doesn't understand intersections
yet, so we settle for just arbitrarily picking the right expr's type.
We only retain the shared expression in the 'else' case because we don't actually know
whether x was refined or y was refined -- only just that one of the two was refined.
"""
if len(type_maps) == 0:
return {}, {}
elif len(type_maps) == 1:
return type_maps[0]
else:
final_if_map, final_else_map = type_maps[0]
for if_map, else_map in type_maps[1:]:
final_if_map = and_conditional_maps(final_if_map, if_map, use_meet=use_meet)
final_else_map = or_conditional_maps(final_else_map, else_map)
return final_if_map, final_else_map
def convert_to_typetype(type_map: TypeMap) -> TypeMap:
converted_type_map: dict[Expression, Type] = {}
if type_map is None:
return None
for expr, typ in type_map.items():
t = typ
if isinstance(t, TypeVarType):
t = t.upper_bound
# TODO: should we only allow unions of instances as per PEP 484?
if not isinstance(get_proper_type(t), (UnionType, Instance, NoneType)):
# unknown type; error was likely reported earlier
return {}
converted_type_map[expr] = TypeType.make_normalized(typ)
return converted_type_map
def flatten(t: Expression) -> list[Expression]:
"""Flatten a nested sequence of tuples/lists into one list of nodes."""
if isinstance(t, (TupleExpr, ListExpr)):
return [b for a in t.items for b in flatten(a)]
elif isinstance(t, StarExpr):
return flatten(t.expr)
else:
return [t]
def flatten_types(t: Type) -> list[Type]:
"""Flatten a nested sequence of tuples into one list of nodes."""
t = get_proper_type(t)
if isinstance(t, TupleType):
return [b for a in t.items for b in flatten_types(a)]
elif is_named_instance(t, "builtins.tuple"):
return [t.args[0]]
else:
return [t]
def expand_func(defn: FuncItem, map: dict[TypeVarId, Type]) -> FuncItem:
visitor = TypeTransformVisitor(map)
ret = visitor.node(defn)
assert isinstance(ret, FuncItem)
return ret
class TypeTransformVisitor(TransformVisitor):
def __init__(self, map: dict[TypeVarId, Type]) -> None:
super().__init__()
self.map = map
def type(self, type: Type) -> Type:
return expand_type(type, self.map)
def are_argument_counts_overlapping(t: CallableType, s: CallableType) -> bool:
"""Can a single call match both t and s, based just on positional argument counts?"""
min_args = max(t.min_args, s.min_args)
max_args = min(t.max_possible_positional_args(), s.max_possible_positional_args())
return min_args <= max_args
def expand_callable_variants(c: CallableType) -> list[CallableType]:
"""Expand a generic callable using all combinations of type variables' values/bounds."""
for tv in c.variables:
# We need to expand self-type before other variables, because this is the only
# type variable that can have other type variables in the upper bound.
if tv.id.is_self():
c = expand_type(c, {tv.id: tv.upper_bound}).copy_modified(
variables=[v for v in c.variables if not v.id.is_self()]
)
break
if not c.is_generic():
# Fast path.
return [c]
tvar_values = []
for tvar in c.variables:
if isinstance(tvar, TypeVarType) and tvar.values:
tvar_values.append(tvar.values)
else:
tvar_values.append([tvar.upper_bound])
variants = []
for combination in itertools.product(*tvar_values):
tvar_map = {tv.id: subst for (tv, subst) in zip(c.variables, combination)}
variants.append(expand_type(c, tvar_map).copy_modified(variables=[]))
return variants
def is_unsafe_overlapping_overload_signatures(
signature: CallableType,
other: CallableType,
class_type_vars: list[TypeVarLikeType],
partial_only: bool = True,
) -> bool:
"""Check if two overloaded signatures are unsafely overlapping or partially overlapping.
We consider two functions 's' and 't' to be unsafely overlapping if three
conditions hold:
1. s's parameters are partially overlapping with t's. i.e. there are calls that are
valid for both signatures.
2. for these common calls, some of t's parameters types are wider that s's.
3. s's return type is NOT a subset of t's.
Note that we use subset rather than subtype relationship in these checks because:
* Overload selection happens at runtime, not statically.
* This results in more lenient behavior.
This can cause false negatives (e.g. if overloaded function returns an externally
visible attribute with invariant type), but such situations are rare. In general,
overloads in Python are generally unsafe, so we intentionally try to avoid giving
non-actionable errors (see more details in comments below).
Assumes that 'signature' appears earlier in the list of overload
alternatives then 'other' and that their argument counts are overlapping.
"""
# Try detaching callables from the containing class so that all TypeVars
# are treated as being free, i.e. the signature is as seen from inside the class,
# where "self" is not yet bound to anything.
signature = detach_callable(signature, class_type_vars)
other = detach_callable(other, class_type_vars)
# Note: We repeat this check twice in both directions compensate for slight
# asymmetries in 'is_callable_compatible'.
for sig_variant in expand_callable_variants(signature):
for other_variant in expand_callable_variants(other):
# Using only expanded callables may cause false negatives, we can add
# more variants (e.g. using inference between callables) in the future.
if is_subset_no_promote(sig_variant.ret_type, other_variant.ret_type):
continue
if not (
is_callable_compatible(
sig_variant,
other_variant,
is_compat=is_overlapping_types_for_overload,
check_args_covariantly=False,
is_proper_subtype=False,
is_compat_return=lambda l, r: not is_subset_no_promote(l, r),
allow_partial_overlap=True,
)
or is_callable_compatible(
other_variant,
sig_variant,
is_compat=is_overlapping_types_for_overload,
check_args_covariantly=True,
is_proper_subtype=False,
is_compat_return=lambda l, r: not is_subset_no_promote(r, l),
allow_partial_overlap=True,
)
):
continue
# Using the same `allow_partial_overlap` flag as before, can cause false
# negatives in case where star argument is used in a catch-all fallback overload.
# But again, practicality beats purity here.
if not partial_only or not is_callable_compatible(
other_variant,
sig_variant,
is_compat=is_subset_no_promote,
check_args_covariantly=True,
is_proper_subtype=False,
ignore_return=True,
allow_partial_overlap=True,
):
return True
return False
def detach_callable(typ: CallableType, class_type_vars: list[TypeVarLikeType]) -> CallableType:
"""Ensures that the callable's type variables are 'detached' and independent of the context.
A callable normally keeps track of the type variables it uses within its 'variables' field.
However, if the callable is from a method and that method is using a class type variable,
the callable will not keep track of that type variable since it belongs to the class.
"""
if not class_type_vars:
# Fast path, nothing to update.
return typ
return typ.copy_modified(variables=list(typ.variables) + class_type_vars)
def overload_can_never_match(signature: CallableType, other: CallableType) -> bool:
"""Check if the 'other' method can never be matched due to 'signature'.
This can happen if signature's parameters are all strictly broader then
other's parameters.
Assumes that both signatures have overlapping argument counts.
"""
# The extra erasure is needed to prevent spurious errors
# in situations where an `Any` overload is used as a fallback
# for an overload with type variables. The spurious error appears
# because the type variables turn into `Any` during unification in
# the below subtype check and (surprisingly?) `is_proper_subtype(Any, Any)`
# returns `True`.
# TODO: find a cleaner solution instead of this ad-hoc erasure.
exp_signature = expand_type(
signature, {tvar.id: erase_def_to_union_or_bound(tvar) for tvar in signature.variables}
)
return is_callable_compatible(
exp_signature, other, is_compat=is_more_precise, is_proper_subtype=True, ignore_return=True
)
def is_more_general_arg_prefix(t: FunctionLike, s: FunctionLike) -> bool:
"""Does t have wider arguments than s?"""
# TODO should an overload with additional items be allowed to be more
# general than one with fewer items (or just one item)?
if isinstance(t, CallableType):
if isinstance(s, CallableType):
return is_callable_compatible(
t, s, is_compat=is_proper_subtype, is_proper_subtype=True, ignore_return=True
)
elif isinstance(t, FunctionLike):
if isinstance(s, FunctionLike):
if len(t.items) == len(s.items):
return all(
is_same_arg_prefix(items, itemt) for items, itemt in zip(t.items, s.items)
)
return False
def is_same_arg_prefix(t: CallableType, s: CallableType) -> bool:
return is_callable_compatible(
t,
s,
is_compat=is_same_type,
is_proper_subtype=True,
ignore_return=True,
check_args_covariantly=True,
ignore_pos_arg_names=True,
)
def infer_operator_assignment_method(typ: Type, operator: str) -> tuple[bool, str]:
"""Determine if operator assignment on given value type is in-place, and the method name.
For example, if operator is '+', return (True, '__iadd__') or (False, '__add__')
depending on which method is supported by the type.
"""
typ = get_proper_type(typ)
method = operators.op_methods[operator]
existing_method = None
if isinstance(typ, Instance):
existing_method = _find_inplace_method(typ, method, operator)
elif isinstance(typ, TypedDictType):
existing_method = _find_inplace_method(typ.fallback, method, operator)
if existing_method is not None:
return True, existing_method
return False, method
def _find_inplace_method(inst: Instance, method: str, operator: str) -> str | None:
if operator in operators.ops_with_inplace_method:
inplace_method = "__i" + method[2:]
if inst.type.has_readable_member(inplace_method):
return inplace_method
return None
def is_valid_inferred_type(typ: Type, is_lvalue_final: bool = False) -> bool:
"""Is an inferred type valid and needs no further refinement?
Examples of invalid types include the None type (when we are not assigning
None to a final lvalue) or List[<uninhabited>].
When not doing strict Optional checking, all types containing None are
invalid. When doing strict Optional checking, only None and types that are
incompletely defined (i.e. contain UninhabitedType) are invalid.
"""
proper_type = get_proper_type(typ)
if isinstance(proper_type, NoneType):
# If the lvalue is final, we may immediately infer NoneType when the
# initializer is None.
#
# If not, we want to defer making this decision. The final inferred
# type could either be NoneType or an Optional type, depending on
# the context. This resolution happens in leave_partial_types when
# we pop a partial types scope.
return is_lvalue_final
elif isinstance(proper_type, UninhabitedType):
return False
return not typ.accept(InvalidInferredTypes())
class InvalidInferredTypes(BoolTypeQuery):
"""Find type components that are not valid for an inferred type.
These include <Erased> type, and any uninhabited types resulting from failed
(ambiguous) type inference.
"""
def __init__(self) -> None:
super().__init__(ANY_STRATEGY)
def visit_uninhabited_type(self, t: UninhabitedType) -> bool:
return t.ambiguous
def visit_erased_type(self, t: ErasedType) -> bool:
# This can happen inside a lambda.
return True
def visit_type_var(self, t: TypeVarType) -> bool:
# This is needed to prevent leaking into partial types during
# multi-step type inference.
return t.id.is_meta_var()
class SetNothingToAny(TypeTranslator):
"""Replace all ambiguous Uninhabited types with Any (to avoid spurious extra errors)."""
def visit_uninhabited_type(self, t: UninhabitedType) -> Type:
if t.ambiguous:
return AnyType(TypeOfAny.from_error)
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
# Target of the alias cannot be an ambiguous UninhabitedType, so we just
# replace the arguments.
return t.copy_modified(args=[a.accept(self) for a in t.args])
def is_node_static(node: Node | None) -> bool | None:
"""Find out if a node describes a static function method."""
if isinstance(node, FuncDef):
return node.is_static
if isinstance(node, Var):
return node.is_staticmethod
return None
class CheckerScope:
# We keep two stacks combined, to maintain the relative order
stack: list[TypeInfo | FuncItem | MypyFile]
def __init__(self, module: MypyFile) -> None:
self.stack = [module]
def top_function(self) -> FuncItem | None:
for e in reversed(self.stack):
if isinstance(e, FuncItem):
return e
return None
def top_non_lambda_function(self) -> FuncItem | None:
for e in reversed(self.stack):
if isinstance(e, FuncItem) and not isinstance(e, LambdaExpr):
return e
return None
def active_class(self) -> TypeInfo | None:
if isinstance(self.stack[-1], TypeInfo):
return self.stack[-1]
return None
def enclosing_class(self) -> TypeInfo | None:
"""Is there a class *directly* enclosing this function?"""
top = self.top_function()
assert top, "This method must be called from inside a function"
index = self.stack.index(top)
assert index, "CheckerScope stack must always start with a module"
enclosing = self.stack[index - 1]
if isinstance(enclosing, TypeInfo):
return enclosing
return None
def active_self_type(self) -> Instance | TupleType | None:
"""An instance or tuple type representing the current class.
This returns None unless we are in class body or in a method.
In particular, inside a function nested in method this returns None.
"""
info = self.active_class()
if not info and self.top_function():
info = self.enclosing_class()
if info:
return fill_typevars(info)
return None
@contextmanager
def push_function(self, item: FuncItem) -> Iterator[None]:
self.stack.append(item)
yield
self.stack.pop()
@contextmanager
def push_class(self, info: TypeInfo) -> Iterator[None]:
self.stack.append(info)
yield
self.stack.pop()
TKey = TypeVar("TKey")
TValue = TypeVar("TValue")
class DisjointDict(Generic[TKey, TValue]):
"""An variation of the union-find algorithm/data structure where instead of keeping
track of just disjoint sets, we keep track of disjoint dicts -- keep track of multiple
Set[Key] -> Set[Value] mappings, where each mapping's keys are guaranteed to be disjoint.
This data structure is currently used exclusively by 'group_comparison_operands' below
to merge chains of '==' and 'is' comparisons when two or more chains use the same expression
in best-case O(n), where n is the number of operands.
Specifically, the `add_mapping()` function and `items()` functions will take on average
O(k + v) and O(n) respectively, where k and v are the number of keys and values we're adding
for a given chain. Note that k <= n and v <= n.
We hit these average/best-case scenarios for most user code: e.g. when the user has just
a single chain like 'a == b == c == d == ...' or multiple disjoint chains like
'a==b < c==d < e==f < ...'. (Note that a naive iterative merging would be O(n^2) for
the latter case).
In comparison, this data structure will make 'group_comparison_operands' have a worst-case
runtime of O(n*log(n)): 'add_mapping()' and 'items()' are worst-case O(k*log(n) + v) and
O(k*log(n)) respectively. This happens only in the rare case where the user keeps repeatedly
making disjoint mappings before merging them in a way that persistently dodges the path
compression optimization in '_lookup_root_id', which would end up constructing a single
tree of height log_2(n). This makes root lookups no longer amoritized constant time when we
finally call 'items()'.
"""
def __init__(self) -> None:
# Each key maps to a unique ID
self._key_to_id: dict[TKey, int] = {}
# Each id points to the parent id, forming a forest of upwards-pointing trees. If the
# current id already is the root, it points to itself. We gradually flatten these trees
# as we perform root lookups: eventually all nodes point directly to its root.
self._id_to_parent_id: dict[int, int] = {}
# Each root id in turn maps to the set of values.
self._root_id_to_values: dict[int, set[TValue]] = {}
def add_mapping(self, keys: set[TKey], values: set[TValue]) -> None:
"""Adds a 'Set[TKey] -> Set[TValue]' mapping. If there already exists a mapping
containing one or more of the given keys, we merge the input mapping with the old one.
Note that the given set of keys must be non-empty -- otherwise, nothing happens.
"""
if not keys:
return
subtree_roots = [self._lookup_or_make_root_id(key) for key in keys]
new_root = subtree_roots[0]
root_values = self._root_id_to_values[new_root]
root_values.update(values)
for subtree_root in subtree_roots[1:]:
if subtree_root == new_root or subtree_root not in self._root_id_to_values:
continue
self._id_to_parent_id[subtree_root] = new_root
root_values.update(self._root_id_to_values.pop(subtree_root))
def items(self) -> list[tuple[set[TKey], set[TValue]]]:
"""Returns all disjoint mappings in key-value pairs."""
root_id_to_keys: dict[int, set[TKey]] = {}
for key in self._key_to_id:
root_id = self._lookup_root_id(key)
if root_id not in root_id_to_keys:
root_id_to_keys[root_id] = set()
root_id_to_keys[root_id].add(key)
output = []
for root_id, keys in root_id_to_keys.items():
output.append((keys, self._root_id_to_values[root_id]))
return output
def _lookup_or_make_root_id(self, key: TKey) -> int:
if key in self._key_to_id:
return self._lookup_root_id(key)
else:
new_id = len(self._key_to_id)
self._key_to_id[key] = new_id
self._id_to_parent_id[new_id] = new_id
self._root_id_to_values[new_id] = set()
return new_id
def _lookup_root_id(self, key: TKey) -> int:
i = self._key_to_id[key]
while i != self._id_to_parent_id[i]:
# Optimization: make keys directly point to their grandparents to speed up
# future traversals. This prevents degenerate trees of height n from forming.
new_parent = self._id_to_parent_id[self._id_to_parent_id[i]]
self._id_to_parent_id[i] = new_parent
i = new_parent
return i
def group_comparison_operands(
pairwise_comparisons: Iterable[tuple[str, Expression, Expression]],
operand_to_literal_hash: Mapping[int, Key],
operators_to_group: set[str],
) -> list[tuple[str, list[int]]]:
"""Group a series of comparison operands together chained by any operand
in the 'operators_to_group' set. All other pairwise operands are kept in
groups of size 2.
For example, suppose we have the input comparison expression:
x0 == x1 == x2 < x3 < x4 is x5 is x6 is not x7 is not x8
If we get these expressions in a pairwise way (e.g. by calling ComparisionExpr's
'pairwise()' method), we get the following as input:
[('==', x0, x1), ('==', x1, x2), ('<', x2, x3), ('<', x3, x4),
('is', x4, x5), ('is', x5, x6), ('is not', x6, x7), ('is not', x7, x8)]
If `operators_to_group` is the set {'==', 'is'}, this function will produce
the following "simplified operator list":
[("==", [0, 1, 2]), ("<", [2, 3]), ("<", [3, 4]),
("is", [4, 5, 6]), ("is not", [6, 7]), ("is not", [7, 8])]
Note that (a) we yield *indices* to the operands rather then the operand
expressions themselves and that (b) operands used in a consecutive chain
of '==' or 'is' are grouped together.
If two of these chains happen to contain operands with the same underlying
literal hash (e.g. are assignable and correspond to the same expression),
we combine those chains together. For example, if we had:
same == x < y == same
...and if 'operand_to_literal_hash' contained the same values for the indices
0 and 3, we'd produce the following output:
[("==", [0, 1, 2, 3]), ("<", [1, 2])]
But if the 'operand_to_literal_hash' did *not* contain an entry, we'd instead
default to returning:
[("==", [0, 1]), ("<", [1, 2]), ("==", [2, 3])]
This function is currently only used to assist with type-narrowing refinements
and is extracted out to a helper function so we can unit test it.
"""
groups: dict[str, DisjointDict[Key, int]] = {op: DisjointDict() for op in operators_to_group}
simplified_operator_list: list[tuple[str, list[int]]] = []
last_operator: str | None = None
current_indices: set[int] = set()
current_hashes: set[Key] = set()
for i, (operator, left_expr, right_expr) in enumerate(pairwise_comparisons):
if last_operator is None:
last_operator = operator
if current_indices and (operator != last_operator or operator not in operators_to_group):
# If some of the operands in the chain are assignable, defer adding it: we might
# end up needing to merge it with other chains that appear later.
if not current_hashes:
simplified_operator_list.append((last_operator, sorted(current_indices)))
else:
groups[last_operator].add_mapping(current_hashes, current_indices)
last_operator = operator
current_indices = set()
current_hashes = set()
# Note: 'i' corresponds to the left operand index, so 'i + 1' is the
# right operand.
current_indices.add(i)
current_indices.add(i + 1)
# We only ever want to combine operands/combine chains for these operators
if operator in operators_to_group:
left_hash = operand_to_literal_hash.get(i)
if left_hash is not None:
current_hashes.add(left_hash)
right_hash = operand_to_literal_hash.get(i + 1)
if right_hash is not None:
current_hashes.add(right_hash)
if last_operator is not None:
if not current_hashes:
simplified_operator_list.append((last_operator, sorted(current_indices)))
else:
groups[last_operator].add_mapping(current_hashes, current_indices)
# Now that we know which chains happen to contain the same underlying expressions
# and can be merged together, add in this info back to the output.
for operator, disjoint_dict in groups.items():
for keys, indices in disjoint_dict.items():
simplified_operator_list.append((operator, sorted(indices)))
# For stability, reorder list by the first operand index to appear
simplified_operator_list.sort(key=lambda item: item[1][0])
return simplified_operator_list
def is_typed_callable(c: Type | None) -> bool:
c = get_proper_type(c)
if not c or not isinstance(c, CallableType):
return False
return not all(
isinstance(t, AnyType) and t.type_of_any == TypeOfAny.unannotated
for t in get_proper_types(c.arg_types + [c.ret_type])
)
def is_untyped_decorator(typ: Type | None) -> bool:
typ = get_proper_type(typ)
if not typ:
return True
elif isinstance(typ, CallableType):
return not is_typed_callable(typ)
elif isinstance(typ, Instance):
method = typ.type.get_method("__call__")
if method:
if isinstance(method, Decorator):
return is_untyped_decorator(method.func.type) or is_untyped_decorator(
method.var.type
)
if isinstance(method.type, Overloaded):
return any(is_untyped_decorator(item) for item in method.type.items)
else:
return not is_typed_callable(method.type)
else:
return False
elif isinstance(typ, Overloaded):
return any(is_untyped_decorator(item) for item in typ.items)
return True
def is_static(func: FuncBase | Decorator) -> bool:
if isinstance(func, Decorator):
return is_static(func.func)
elif isinstance(func, FuncBase):
return func.is_static
assert False, f"Unexpected func type: {type(func)}"
def is_property(defn: SymbolNode) -> bool:
if isinstance(defn, Decorator):
return defn.func.is_property
if isinstance(defn, OverloadedFuncDef):
if defn.items and isinstance(defn.items[0], Decorator):
return defn.items[0].func.is_property
return False
def get_property_type(t: ProperType) -> ProperType:
if isinstance(t, CallableType):
return get_proper_type(t.ret_type)
if isinstance(t, Overloaded):
return get_proper_type(t.items[0].ret_type)
return t
def is_subset_no_promote(left: Type, right: Type) -> bool:
return is_subtype(left, right, ignore_promotions=True, always_covariant=True)
def is_overlapping_types_for_overload(left: Type, right: Type) -> bool:
# Note that among other effects 'overlap_for_overloads' flag will effectively
# ignore possible overlap between type variables and None. This is technically
# unsafe, but unsafety is tiny and this prevents some common use cases like:
# @overload
# def foo(x: None) -> None: ..
# @overload
# def foo(x: T) -> Foo[T]: ...
return is_overlapping_types(
left,
right,
ignore_promotions=True,
prohibit_none_typevar_overlap=True,
overlap_for_overloads=True,
)
def is_private(node_name: str) -> bool:
"""Check if node is private to class definition."""
return node_name.startswith("__") and not node_name.endswith("__")
def is_string_literal(typ: Type) -> bool:
strs = try_getting_str_literals_from_type(typ)
return strs is not None and len(strs) == 1
def has_bool_item(typ: ProperType) -> bool:
"""Return True if type is 'bool' or a union with a 'bool' item."""
if is_named_instance(typ, "builtins.bool"):
return True
if isinstance(typ, UnionType):
return any(is_named_instance(item, "builtins.bool") for item in typ.items)
return False
def collapse_walrus(e: Expression) -> Expression:
"""If an expression is an AssignmentExpr, pull out the assignment target.
We don't make any attempt to pull out all the targets in code like `x := (y := z)`.
We could support narrowing those if that sort of code turns out to be common.
"""
if isinstance(e, AssignmentExpr):
return e.target
return e
def find_last_var_assignment_line(n: Node, v: Var) -> int:
"""Find the highest line number of a potential assignment to variable within node.
This supports local and global variables.
Return -1 if no assignment was found.
"""
visitor = VarAssignVisitor(v)
n.accept(visitor)
return visitor.last_line
class VarAssignVisitor(TraverserVisitor):
def __init__(self, v: Var) -> None:
self.last_line = -1
self.lvalue = False
self.var_node = v
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
self.lvalue = True
for lv in s.lvalues:
lv.accept(self)
self.lvalue = False
def visit_name_expr(self, e: NameExpr) -> None:
if self.lvalue and e.node is self.var_node:
self.last_line = max(self.last_line, e.line)
def visit_member_expr(self, e: MemberExpr) -> None:
old_lvalue = self.lvalue
self.lvalue = False
super().visit_member_expr(e)
self.lvalue = old_lvalue
def visit_index_expr(self, e: IndexExpr) -> None:
old_lvalue = self.lvalue
self.lvalue = False
super().visit_index_expr(e)
self.lvalue = old_lvalue
def visit_with_stmt(self, s: WithStmt) -> None:
self.lvalue = True
for lv in s.target:
if lv is not None:
lv.accept(self)
self.lvalue = False
s.body.accept(self)
def visit_for_stmt(self, s: ForStmt) -> None:
self.lvalue = True
s.index.accept(self)
self.lvalue = False
s.body.accept(self)
if s.else_body:
s.else_body.accept(self)
def visit_assignment_expr(self, e: AssignmentExpr) -> None:
self.lvalue = True
e.target.accept(self)
self.lvalue = False
e.value.accept(self)
def visit_as_pattern(self, p: AsPattern) -> None:
if p.pattern is not None:
p.pattern.accept(self)
if p.name is not None:
self.lvalue = True
p.name.accept(self)
self.lvalue = False
def visit_starred_pattern(self, p: StarredPattern) -> None:
if p.capture is not None:
self.lvalue = True
p.capture.accept(self)
self.lvalue = False
def is_ambiguous_mix_of_enums(types: list[Type]) -> bool:
"""Do types have IntEnum/StrEnum types that are potentially overlapping with other types?
If True, we shouldn't attempt type narrowing based on enum values, as it gets
too ambiguous.
For example, return True if there's an 'int' type together with an IntEnum literal.
However, IntEnum together with a literal of the same IntEnum type is not ambiguous.
"""
# We need these things for this to be ambiguous:
# (1) an IntEnum or StrEnum type
# (2) either a different IntEnum/StrEnum type or a non-enum type ("<other>")
#
# It would be slightly more correct to calculate this separately for IntEnum and
# StrEnum related types, as an IntEnum can't be confused with a StrEnum.
return len(_ambiguous_enum_variants(types)) > 1
def _ambiguous_enum_variants(types: list[Type]) -> set[str]:
result = set()
for t in types:
t = get_proper_type(t)
if isinstance(t, UnionType):
result.update(_ambiguous_enum_variants(t.items))
elif isinstance(t, Instance):
if t.last_known_value:
result.update(_ambiguous_enum_variants([t.last_known_value]))
elif t.type.is_enum and any(
base.fullname in ("enum.IntEnum", "enum.StrEnum") for base in t.type.mro
):
result.add(t.type.fullname)
elif not t.type.is_enum:
# These might compare equal to IntEnum/StrEnum types (e.g. Decimal), so
# let's be conservative
result.add("<other>")
elif isinstance(t, LiteralType):
result.update(_ambiguous_enum_variants([t.fallback]))
elif isinstance(t, NoneType):
pass
else:
result.add("<other>")
return result
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/checker.py
|
Python
|
NOASSERTION
| 389,651 |
"""Expression type checker. This file is conceptually part of TypeChecker."""
from __future__ import annotations
import enum
import itertools
import time
from collections import defaultdict
from contextlib import contextmanager
from typing import Callable, ClassVar, Final, Iterable, Iterator, List, Optional, Sequence, cast
from typing_extensions import TypeAlias as _TypeAlias, assert_never, overload
import mypy.checker
import mypy.errorcodes as codes
from mypy import applytype, erasetype, join, message_registry, nodes, operators, types
from mypy.argmap import ArgTypeExpander, map_actuals_to_formals, map_formals_to_actuals
from mypy.checkmember import analyze_member_access, freeze_all_type_vars, type_object_type
from mypy.checkstrformat import StringFormatterChecker
from mypy.erasetype import erase_type, remove_instance_last_known_values, replace_meta_vars
from mypy.errors import ErrorWatcher, report_internal_error
from mypy.expandtype import (
expand_type,
expand_type_by_instance,
freshen_all_functions_type_vars,
freshen_function_type_vars,
)
from mypy.infer import ArgumentInferContext, infer_function_type_arguments, infer_type_arguments
from mypy.literals import literal
from mypy.maptype import map_instance_to_supertype
from mypy.meet import is_overlapping_types, narrow_declared_type
from mypy.message_registry import ErrorMessage
from mypy.messages import MessageBuilder, format_type
from mypy.nodes import (
ARG_NAMED,
ARG_POS,
ARG_STAR,
ARG_STAR2,
IMPLICITLY_ABSTRACT,
LAMBDA_NAME,
LITERAL_TYPE,
REVEAL_LOCALS,
REVEAL_TYPE,
ArgKind,
AssertTypeExpr,
AssignmentExpr,
AwaitExpr,
BytesExpr,
CallExpr,
CastExpr,
ComparisonExpr,
ComplexExpr,
ConditionalExpr,
Context,
Decorator,
DictExpr,
DictionaryComprehension,
EllipsisExpr,
EnumCallExpr,
Expression,
FloatExpr,
FuncDef,
GeneratorExpr,
IndexExpr,
IntExpr,
LambdaExpr,
ListComprehension,
ListExpr,
MemberExpr,
MypyFile,
NamedTupleExpr,
NameExpr,
NewTypeExpr,
OpExpr,
OverloadedFuncDef,
ParamSpecExpr,
PlaceholderNode,
PromoteExpr,
RefExpr,
RevealExpr,
SetComprehension,
SetExpr,
SliceExpr,
StarExpr,
StrExpr,
SuperExpr,
SymbolNode,
TempNode,
TupleExpr,
TypeAlias,
TypeAliasExpr,
TypeApplication,
TypedDictExpr,
TypeInfo,
TypeVarExpr,
TypeVarTupleExpr,
UnaryExpr,
Var,
YieldExpr,
YieldFromExpr,
)
from mypy.options import PRECISE_TUPLE_TYPES
from mypy.plugin import (
FunctionContext,
FunctionSigContext,
MethodContext,
MethodSigContext,
Plugin,
)
from mypy.semanal_enum import ENUM_BASES
from mypy.state import state
from mypy.subtypes import (
find_member,
is_equivalent,
is_same_type,
is_subtype,
non_method_protocol_members,
)
from mypy.traverser import has_await_expression
from mypy.typeanal import (
check_for_explicit_any,
fix_instance,
has_any_from_unimported_type,
instantiate_type_alias,
make_optional_type,
set_any_tvars,
validate_instance,
)
from mypy.typeops import (
callable_type,
custom_special_method,
erase_to_union_or_bound,
false_only,
fixup_partial_type,
function_type,
get_all_type_vars,
get_type_vars,
is_literal_type_like,
make_simplified_union,
simple_literal_type,
true_only,
try_expanding_sum_type_to_union,
try_getting_str_literals,
tuple_fallback,
)
from mypy.types import (
LITERAL_TYPE_NAMES,
TUPLE_LIKE_INSTANCE_NAMES,
AnyType,
CallableType,
DeletedType,
ErasedType,
ExtraAttrs,
FunctionLike,
Instance,
LiteralType,
LiteralValue,
NoneType,
Overloaded,
Parameters,
ParamSpecFlavor,
ParamSpecType,
PartialType,
ProperType,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeOfAny,
TypeType,
TypeVarId,
TypeVarTupleType,
TypeVarType,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
find_unpack_in_list,
flatten_nested_tuples,
flatten_nested_unions,
get_proper_type,
get_proper_types,
has_recursive_types,
is_named_instance,
split_with_prefix_and_suffix,
)
from mypy.types_utils import (
is_generic_instance,
is_overlapping_none,
is_self_type_like,
remove_optional,
)
from mypy.typestate import type_state
from mypy.typevars import fill_typevars
from mypy.util import split_module_names
from mypy.visitor import ExpressionVisitor
# Type of callback user for checking individual function arguments. See
# check_args() below for details.
ArgChecker: _TypeAlias = Callable[
[Type, Type, ArgKind, Type, int, int, CallableType, Optional[Type], Context, Context], None
]
# Maximum nesting level for math union in overloads, setting this to large values
# may cause performance issues. The reason is that although union math algorithm we use
# nicely captures most corner cases, its worst case complexity is exponential,
# see https://github.com/python/mypy/pull/5255#discussion_r196896335 for discussion.
MAX_UNIONS: Final = 5
# Types considered safe for comparisons with --strict-equality due to known behaviour of __eq__.
# NOTE: All these types are subtypes of AbstractSet.
OVERLAPPING_TYPES_ALLOWLIST: Final = [
"builtins.set",
"builtins.frozenset",
"typing.KeysView",
"typing.ItemsView",
"builtins._dict_keys",
"builtins._dict_items",
"_collections_abc.dict_keys",
"_collections_abc.dict_items",
]
OVERLAPPING_BYTES_ALLOWLIST: Final = {
"builtins.bytes",
"builtins.bytearray",
"builtins.memoryview",
}
class TooManyUnions(Exception):
"""Indicates that we need to stop splitting unions in an attempt
to match an overload in order to save performance.
"""
def allow_fast_container_literal(t: Type) -> bool:
if isinstance(t, TypeAliasType) and t.is_recursive:
return False
t = get_proper_type(t)
return isinstance(t, Instance) or (
isinstance(t, TupleType) and all(allow_fast_container_literal(it) for it in t.items)
)
def extract_refexpr_names(expr: RefExpr) -> set[str]:
"""Recursively extracts all module references from a reference expression.
Note that currently, the only two subclasses of RefExpr are NameExpr and
MemberExpr."""
output: set[str] = set()
while isinstance(expr.node, MypyFile) or expr.fullname:
if isinstance(expr.node, MypyFile) and expr.fullname:
# If it's None, something's wrong (perhaps due to an
# import cycle or a suppressed error). For now we just
# skip it.
output.add(expr.fullname)
if isinstance(expr, NameExpr):
is_suppressed_import = isinstance(expr.node, Var) and expr.node.is_suppressed_import
if isinstance(expr.node, TypeInfo):
# Reference to a class or a nested class
output.update(split_module_names(expr.node.module_name))
elif "." in expr.fullname and not is_suppressed_import:
# Everything else (that is not a silenced import within a class)
output.add(expr.fullname.rsplit(".", 1)[0])
break
elif isinstance(expr, MemberExpr):
if isinstance(expr.expr, RefExpr):
expr = expr.expr
else:
break
else:
raise AssertionError(f"Unknown RefExpr subclass: {type(expr)}")
return output
class Finished(Exception):
"""Raised if we can terminate overload argument check early (no match)."""
@enum.unique
class UseReverse(enum.Enum):
"""Used in `visit_op_expr` to enable or disable reverse method checks."""
DEFAULT = 0
ALWAYS = 1
NEVER = 2
USE_REVERSE_DEFAULT: Final = UseReverse.DEFAULT
USE_REVERSE_ALWAYS: Final = UseReverse.ALWAYS
USE_REVERSE_NEVER: Final = UseReverse.NEVER
class ExpressionChecker(ExpressionVisitor[Type]):
"""Expression type checker.
This class works closely together with checker.TypeChecker.
"""
# Some services are provided by a TypeChecker instance.
chk: mypy.checker.TypeChecker
# This is shared with TypeChecker, but stored also here for convenience.
msg: MessageBuilder
# Type context for type inference
type_context: list[Type | None]
# cache resolved types in some cases
resolved_type: dict[Expression, ProperType]
strfrm_checker: StringFormatterChecker
plugin: Plugin
def __init__(
self,
chk: mypy.checker.TypeChecker,
msg: MessageBuilder,
plugin: Plugin,
per_line_checking_time_ns: dict[int, int],
) -> None:
"""Construct an expression type checker."""
self.chk = chk
self.msg = msg
self.plugin = plugin
self.per_line_checking_time_ns = per_line_checking_time_ns
self.collect_line_checking_stats = chk.options.line_checking_stats is not None
# Are we already visiting some expression? This is used to avoid double counting
# time for nested expressions.
self.in_expression = False
self.type_context = [None]
# Temporary overrides for expression types. This is currently
# used by the union math in overloads.
# TODO: refactor this to use a pattern similar to one in
# multiassign_from_union, or maybe even combine the two?
self.type_overrides: dict[Expression, Type] = {}
self.strfrm_checker = StringFormatterChecker(self, self.chk, self.msg)
self.resolved_type = {}
# Callee in a call expression is in some sense both runtime context and
# type context, because we support things like C[int](...). Store information
# on whether current expression is a callee, to give better error messages
# related to type context.
self.is_callee = False
type_state.infer_polymorphic = not self.chk.options.old_type_inference
def reset(self) -> None:
self.resolved_type = {}
def visit_name_expr(self, e: NameExpr) -> Type:
"""Type check a name expression.
It can be of any kind: local, member or global.
"""
self.chk.module_refs.update(extract_refexpr_names(e))
result = self.analyze_ref_expr(e)
return self.narrow_type_from_binder(e, result)
def analyze_ref_expr(self, e: RefExpr, lvalue: bool = False) -> Type:
result: Type | None = None
node = e.node
if isinstance(e, NameExpr) and e.is_special_form:
# A special form definition, nothing to check here.
return AnyType(TypeOfAny.special_form)
if isinstance(node, Var):
# Variable reference.
result = self.analyze_var_ref(node, e)
if isinstance(result, PartialType):
result = self.chk.handle_partial_var_type(result, lvalue, node, e)
elif isinstance(node, FuncDef):
# Reference to a global function.
result = function_type(node, self.named_type("builtins.function"))
elif isinstance(node, OverloadedFuncDef):
if node.type is None:
if self.chk.in_checked_function() and node.items:
self.chk.handle_cannot_determine_type(node.name, e)
result = AnyType(TypeOfAny.from_error)
else:
result = node.type
elif isinstance(node, TypeInfo):
# Reference to a type object.
if node.typeddict_type:
# We special-case TypedDict, because they don't define any constructor.
result = self.typeddict_callable(node)
elif node.fullname == "types.NoneType":
# We special case NoneType, because its stub definition is not related to None.
result = TypeType(NoneType())
else:
result = type_object_type(node, self.named_type)
if isinstance(result, CallableType) and isinstance( # type: ignore[misc]
result.ret_type, Instance
):
# We need to set correct line and column
# TODO: always do this in type_object_type by passing the original context
result.ret_type.line = e.line
result.ret_type.column = e.column
if is_type_type_context(self.type_context[-1]):
# This is the type in a type[] expression, so substitute type
# variables with Any.
result = erasetype.erase_typevars(result)
elif isinstance(node, MypyFile):
# Reference to a module object.
result = self.module_type(node)
elif isinstance(node, Decorator):
result = self.analyze_var_ref(node.var, e)
elif isinstance(node, TypeAlias):
# Something that refers to a type alias appears in runtime context.
# Note that we suppress bogus errors for alias redefinitions,
# they are already reported in semanal.py.
result = self.alias_type_in_runtime_context(
node, ctx=e, alias_definition=e.is_alias_rvalue or lvalue
)
elif isinstance(node, TypeVarExpr):
return self.named_type("typing.TypeVar")
elif isinstance(node, (ParamSpecExpr, TypeVarTupleExpr)):
result = self.object_type()
else:
if isinstance(node, PlaceholderNode):
assert False, f"PlaceholderNode {node.fullname!r} leaked to checker"
# Unknown reference; use any type implicitly to avoid
# generating extra type errors.
result = AnyType(TypeOfAny.from_error)
assert result is not None
return result
def analyze_var_ref(self, var: Var, context: Context) -> Type:
if var.type:
var_type = get_proper_type(var.type)
if isinstance(var_type, Instance):
if var.fullname == "typing.Any":
# The typeshed type is 'object'; give a more useful type in runtime context
return self.named_type("typing._SpecialForm")
if self.is_literal_context() and var_type.last_known_value is not None:
return var_type.last_known_value
if var.name in {"True", "False"}:
return self.infer_literal_expr_type(var.name == "True", "builtins.bool")
return var.type
else:
if not var.is_ready and self.chk.in_checked_function():
self.chk.handle_cannot_determine_type(var.name, context)
# Implicit 'Any' type.
return AnyType(TypeOfAny.special_form)
def module_type(self, node: MypyFile) -> Instance:
try:
result = self.named_type("types.ModuleType")
except KeyError:
# In test cases might 'types' may not be available.
# Fall back to a dummy 'object' type instead to
# avoid a crash.
result = self.named_type("builtins.object")
module_attrs = {}
immutable = set()
for name, n in node.names.items():
if not n.module_public:
continue
if isinstance(n.node, Var) and n.node.is_final:
immutable.add(name)
typ = self.chk.determine_type_of_member(n)
if typ:
module_attrs[name] = typ
else:
# TODO: what to do about nested module references?
# They are non-trivial because there may be import cycles.
module_attrs[name] = AnyType(TypeOfAny.special_form)
result.extra_attrs = ExtraAttrs(module_attrs, immutable, node.fullname)
return result
def visit_call_expr(self, e: CallExpr, allow_none_return: bool = False) -> Type:
"""Type check a call expression."""
if e.analyzed:
if isinstance(e.analyzed, NamedTupleExpr) and not e.analyzed.is_typed:
# Type check the arguments, but ignore the results. This relies
# on the typeshed stubs to type check the arguments.
self.visit_call_expr_inner(e)
# It's really a special form that only looks like a call.
return self.accept(e.analyzed, self.type_context[-1])
return self.visit_call_expr_inner(e, allow_none_return=allow_none_return)
def refers_to_typeddict(self, base: Expression) -> bool:
if not isinstance(base, RefExpr):
return False
if isinstance(base.node, TypeInfo) and base.node.typeddict_type is not None:
# Direct reference.
return True
return isinstance(base.node, TypeAlias) and isinstance(
get_proper_type(base.node.target), TypedDictType
)
def visit_call_expr_inner(self, e: CallExpr, allow_none_return: bool = False) -> Type:
if (
self.refers_to_typeddict(e.callee)
or isinstance(e.callee, IndexExpr)
and self.refers_to_typeddict(e.callee.base)
):
typeddict_callable = get_proper_type(self.accept(e.callee, is_callee=True))
if isinstance(typeddict_callable, CallableType):
typeddict_type = get_proper_type(typeddict_callable.ret_type)
assert isinstance(typeddict_type, TypedDictType)
return self.check_typeddict_call(
typeddict_type, e.arg_kinds, e.arg_names, e.args, e, typeddict_callable
)
if (
isinstance(e.callee, NameExpr)
and e.callee.name in ("isinstance", "issubclass")
and len(e.args) == 2
):
for typ in mypy.checker.flatten(e.args[1]):
node = None
if isinstance(typ, NameExpr):
try:
node = self.chk.lookup_qualified(typ.name)
except KeyError:
# Undefined names should already be reported in semantic analysis.
pass
if is_expr_literal_type(typ):
self.msg.cannot_use_function_with_type(e.callee.name, "Literal", e)
continue
if node and isinstance(node.node, TypeAlias):
target = get_proper_type(node.node.target)
if isinstance(target, AnyType):
self.msg.cannot_use_function_with_type(e.callee.name, "Any", e)
continue
if isinstance(target, NoneType):
continue
if (
isinstance(typ, IndexExpr)
and isinstance(typ.analyzed, (TypeApplication, TypeAliasExpr))
) or (
isinstance(typ, NameExpr)
and node
and isinstance(node.node, TypeAlias)
and not node.node.no_args
and not (
isinstance(union_target := get_proper_type(node.node.target), UnionType)
and union_target.uses_pep604_syntax
)
):
self.msg.type_arguments_not_allowed(e)
if isinstance(typ, RefExpr) and isinstance(typ.node, TypeInfo):
if typ.node.typeddict_type:
self.msg.cannot_use_function_with_type(e.callee.name, "TypedDict", e)
elif typ.node.is_newtype:
self.msg.cannot_use_function_with_type(e.callee.name, "NewType", e)
self.try_infer_partial_type(e)
type_context = None
if isinstance(e.callee, LambdaExpr):
formal_to_actual = map_actuals_to_formals(
e.arg_kinds,
e.arg_names,
e.callee.arg_kinds,
e.callee.arg_names,
lambda i: self.accept(e.args[i]),
)
arg_types = [
join.join_type_list([self.accept(e.args[j]) for j in formal_to_actual[i]])
for i in range(len(e.callee.arg_kinds))
]
type_context = CallableType(
arg_types,
e.callee.arg_kinds,
e.callee.arg_names,
ret_type=self.object_type(),
fallback=self.named_type("builtins.function"),
)
callee_type = get_proper_type(
self.accept(e.callee, type_context, always_allow_any=True, is_callee=True)
)
# Figure out the full name of the callee for plugin lookup.
object_type = None
member = None
fullname = None
if isinstance(e.callee, RefExpr):
# There are two special cases where plugins might act:
# * A "static" reference/alias to a class or function;
# get_function_hook() will be invoked for these.
fullname = e.callee.fullname or None
if isinstance(e.callee.node, TypeAlias):
target = get_proper_type(e.callee.node.target)
if isinstance(target, Instance):
fullname = target.type.fullname
# * Call to a method on object that has a full name (see
# method_fullname() for details on supported objects);
# get_method_hook() and get_method_signature_hook() will
# be invoked for these.
if (
not fullname
and isinstance(e.callee, MemberExpr)
and self.chk.has_type(e.callee.expr)
):
member = e.callee.name
object_type = self.chk.lookup_type(e.callee.expr)
if (
self.chk.options.disallow_untyped_calls
and self.chk.in_checked_function()
and isinstance(callee_type, CallableType)
and callee_type.implicit
and callee_type.name != LAMBDA_NAME
):
if fullname is None and member is not None:
assert object_type is not None
fullname = self.method_fullname(object_type, member)
if not fullname or not any(
fullname == p or fullname.startswith(f"{p}.")
for p in self.chk.options.untyped_calls_exclude
):
self.msg.untyped_function_call(callee_type, e)
ret_type = self.check_call_expr_with_callee_type(
callee_type, e, fullname, object_type, member
)
if isinstance(e.callee, RefExpr) and len(e.args) == 2:
if e.callee.fullname in ("builtins.isinstance", "builtins.issubclass"):
self.check_runtime_protocol_test(e)
if e.callee.fullname == "builtins.issubclass":
self.check_protocol_issubclass(e)
if isinstance(e.callee, MemberExpr) and e.callee.name == "format":
self.check_str_format_call(e)
ret_type = get_proper_type(ret_type)
if isinstance(ret_type, UnionType):
ret_type = make_simplified_union(ret_type.items)
if isinstance(ret_type, UninhabitedType) and not ret_type.ambiguous:
self.chk.binder.unreachable()
# Warn on calls to functions that always return None. The check
# of ret_type is both a common-case optimization and prevents reporting
# the error in dynamic functions (where it will be Any).
if (
not allow_none_return
and isinstance(ret_type, NoneType)
and self.always_returns_none(e.callee)
):
self.chk.msg.does_not_return_value(callee_type, e)
return AnyType(TypeOfAny.from_error)
return ret_type
def check_str_format_call(self, e: CallExpr) -> None:
"""More precise type checking for str.format() calls on literals."""
assert isinstance(e.callee, MemberExpr)
format_value = None
if isinstance(e.callee.expr, StrExpr):
format_value = e.callee.expr.value
elif self.chk.has_type(e.callee.expr):
typ = get_proper_type(self.chk.lookup_type(e.callee.expr))
if (
isinstance(typ, Instance)
and typ.type.is_enum
and isinstance(typ.last_known_value, LiteralType)
and isinstance(typ.last_known_value.value, str)
):
value_type = typ.type.names[typ.last_known_value.value].type
if isinstance(value_type, Type):
typ = get_proper_type(value_type)
base_typ = try_getting_literal(typ)
if isinstance(base_typ, LiteralType) and isinstance(base_typ.value, str):
format_value = base_typ.value
if format_value is not None:
self.strfrm_checker.check_str_format_call(e, format_value)
def method_fullname(self, object_type: Type, method_name: str) -> str | None:
"""Convert a method name to a fully qualified name, based on the type of the object that
it is invoked on. Return `None` if the name of `object_type` cannot be determined.
"""
object_type = get_proper_type(object_type)
if isinstance(object_type, CallableType) and object_type.is_type_obj():
# For class method calls, object_type is a callable representing the class object.
# We "unwrap" it to a regular type, as the class/instance method difference doesn't
# affect the fully qualified name.
object_type = get_proper_type(object_type.ret_type)
elif isinstance(object_type, TypeType):
object_type = object_type.item
type_name = None
if isinstance(object_type, Instance):
type_name = object_type.type.fullname
elif isinstance(object_type, (TypedDictType, LiteralType)):
info = object_type.fallback.type.get_containing_type_info(method_name)
type_name = info.fullname if info is not None else None
elif isinstance(object_type, TupleType):
type_name = tuple_fallback(object_type).type.fullname
if type_name:
return f"{type_name}.{method_name}"
else:
return None
def always_returns_none(self, node: Expression) -> bool:
"""Check if `node` refers to something explicitly annotated as only returning None."""
if isinstance(node, RefExpr):
if self.defn_returns_none(node.node):
return True
if isinstance(node, MemberExpr) and node.node is None: # instance or class attribute
typ = get_proper_type(self.chk.lookup_type(node.expr))
if isinstance(typ, Instance):
info = typ.type
elif isinstance(typ, CallableType) and typ.is_type_obj():
ret_type = get_proper_type(typ.ret_type)
if isinstance(ret_type, Instance):
info = ret_type.type
else:
return False
else:
return False
sym = info.get(node.name)
if sym and self.defn_returns_none(sym.node):
return True
return False
def defn_returns_none(self, defn: SymbolNode | None) -> bool:
"""Check if `defn` can _only_ return None."""
if isinstance(defn, FuncDef):
return isinstance(defn.type, CallableType) and isinstance(
get_proper_type(defn.type.ret_type), NoneType
)
if isinstance(defn, OverloadedFuncDef):
return all(self.defn_returns_none(item) for item in defn.items)
if isinstance(defn, Var):
typ = get_proper_type(defn.type)
if (
not defn.is_inferred
and isinstance(typ, CallableType)
and isinstance(get_proper_type(typ.ret_type), NoneType)
):
return True
if isinstance(typ, Instance):
sym = typ.type.get("__call__")
if sym and self.defn_returns_none(sym.node):
return True
return False
def check_runtime_protocol_test(self, e: CallExpr) -> None:
for expr in mypy.checker.flatten(e.args[1]):
tp = get_proper_type(self.chk.lookup_type(expr))
if (
isinstance(tp, FunctionLike)
and tp.is_type_obj()
and tp.type_object().is_protocol
and not tp.type_object().runtime_protocol
):
self.chk.fail(message_registry.RUNTIME_PROTOCOL_EXPECTED, e)
def check_protocol_issubclass(self, e: CallExpr) -> None:
for expr in mypy.checker.flatten(e.args[1]):
tp = get_proper_type(self.chk.lookup_type(expr))
if isinstance(tp, FunctionLike) and tp.is_type_obj() and tp.type_object().is_protocol:
attr_members = non_method_protocol_members(tp.type_object())
if attr_members:
self.chk.msg.report_non_method_protocol(tp.type_object(), attr_members, e)
def check_typeddict_call(
self,
callee: TypedDictType,
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None],
args: list[Expression],
context: Context,
orig_callee: Type | None,
) -> Type:
if args and all(ak in (ARG_NAMED, ARG_STAR2) for ak in arg_kinds):
# ex: Point(x=42, y=1337, **extras)
# This is a bit ugly, but this is a price for supporting all possible syntax
# variants for TypedDict constructors.
kwargs = zip([StrExpr(n) if n is not None else None for n in arg_names], args)
result = self.validate_typeddict_kwargs(kwargs=kwargs, callee=callee)
if result is not None:
validated_kwargs, always_present_keys = result
return self.check_typeddict_call_with_kwargs(
callee, validated_kwargs, context, orig_callee, always_present_keys
)
return AnyType(TypeOfAny.from_error)
if len(args) == 1 and arg_kinds[0] == ARG_POS:
unique_arg = args[0]
if isinstance(unique_arg, DictExpr):
# ex: Point({'x': 42, 'y': 1337, **extras})
return self.check_typeddict_call_with_dict(
callee, unique_arg.items, context, orig_callee
)
if isinstance(unique_arg, CallExpr) and isinstance(unique_arg.analyzed, DictExpr):
# ex: Point(dict(x=42, y=1337, **extras))
return self.check_typeddict_call_with_dict(
callee, unique_arg.analyzed.items, context, orig_callee
)
if not args:
# ex: EmptyDict()
return self.check_typeddict_call_with_kwargs(callee, {}, context, orig_callee, set())
self.chk.fail(message_registry.INVALID_TYPEDDICT_ARGS, context)
return AnyType(TypeOfAny.from_error)
def validate_typeddict_kwargs(
self, kwargs: Iterable[tuple[Expression | None, Expression]], callee: TypedDictType
) -> tuple[dict[str, list[Expression]], set[str]] | None:
# All (actual or mapped from ** unpacks) expressions that can match given key.
result = defaultdict(list)
# Keys that are guaranteed to be present no matter what (e.g. for all items of a union)
always_present_keys = set()
# Indicates latest encountered ** unpack among items.
last_star_found = None
for item_name_expr, item_arg in kwargs:
if item_name_expr:
key_type = self.accept(item_name_expr)
values = try_getting_str_literals(item_name_expr, key_type)
literal_value = None
if values and len(values) == 1:
literal_value = values[0]
if literal_value is None:
key_context = item_name_expr or item_arg
self.chk.fail(
message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL,
key_context,
code=codes.LITERAL_REQ,
)
return None
else:
# A directly present key unconditionally shadows all previously found
# values from ** items.
# TODO: for duplicate keys, type-check all values.
result[literal_value] = [item_arg]
always_present_keys.add(literal_value)
else:
last_star_found = item_arg
if not self.validate_star_typeddict_item(
item_arg, callee, result, always_present_keys
):
return None
if self.chk.options.extra_checks and last_star_found is not None:
absent_keys = []
for key in callee.items:
if key not in callee.required_keys and key not in result:
absent_keys.append(key)
if absent_keys:
# Having an optional key not explicitly declared by a ** unpacked
# TypedDict is unsafe, it may be an (incompatible) subtype at runtime.
# TODO: catch the cases where a declared key is overridden by a subsequent
# ** item without it (and not again overriden with complete ** item).
self.msg.non_required_keys_absent_with_star(absent_keys, last_star_found)
return result, always_present_keys
def validate_star_typeddict_item(
self,
item_arg: Expression,
callee: TypedDictType,
result: dict[str, list[Expression]],
always_present_keys: set[str],
) -> bool:
"""Update keys/expressions from a ** expression in TypedDict constructor.
Note `result` and `always_present_keys` are updated in place. Return true if the
expression `item_arg` may valid in `callee` TypedDict context.
"""
inferred = get_proper_type(self.accept(item_arg, type_context=callee))
possible_tds = []
if isinstance(inferred, TypedDictType):
possible_tds = [inferred]
elif isinstance(inferred, UnionType):
for item in get_proper_types(inferred.relevant_items()):
if isinstance(item, TypedDictType):
possible_tds.append(item)
elif not self.valid_unpack_fallback_item(item):
self.msg.unsupported_target_for_star_typeddict(item, item_arg)
return False
elif not self.valid_unpack_fallback_item(inferred):
self.msg.unsupported_target_for_star_typeddict(inferred, item_arg)
return False
all_keys: set[str] = set()
for td in possible_tds:
all_keys |= td.items.keys()
for key in all_keys:
arg = TempNode(
UnionType.make_union([td.items[key] for td in possible_tds if key in td.items])
)
arg.set_line(item_arg)
if all(key in td.required_keys for td in possible_tds):
always_present_keys.add(key)
# Always present keys override previously found values. This is done
# to support use cases like `Config({**defaults, **overrides})`, where
# some `overrides` types are narrower that types in `defaults`, and
# former are too wide for `Config`.
if result[key]:
first = result[key][0]
if not isinstance(first, TempNode):
# We must always preserve any non-synthetic values, so that
# we will accept them even if they are shadowed.
result[key] = [first, arg]
else:
result[key] = [arg]
else:
result[key] = [arg]
else:
# If this key is not required at least in some item of a union
# it may not shadow previous item, so we need to type check both.
result[key].append(arg)
return True
def valid_unpack_fallback_item(self, typ: ProperType) -> bool:
if isinstance(typ, AnyType):
return True
if not isinstance(typ, Instance) or not typ.type.has_base("typing.Mapping"):
return False
mapped = map_instance_to_supertype(typ, self.chk.lookup_typeinfo("typing.Mapping"))
return all(isinstance(a, AnyType) for a in get_proper_types(mapped.args))
def match_typeddict_call_with_dict(
self,
callee: TypedDictType,
kwargs: list[tuple[Expression | None, Expression]],
context: Context,
) -> bool:
result = self.validate_typeddict_kwargs(kwargs=kwargs, callee=callee)
if result is not None:
validated_kwargs, _ = result
return callee.required_keys <= set(validated_kwargs.keys()) <= set(callee.items.keys())
else:
return False
def check_typeddict_call_with_dict(
self,
callee: TypedDictType,
kwargs: list[tuple[Expression | None, Expression]],
context: Context,
orig_callee: Type | None,
) -> Type:
result = self.validate_typeddict_kwargs(kwargs=kwargs, callee=callee)
if result is not None:
validated_kwargs, always_present_keys = result
return self.check_typeddict_call_with_kwargs(
callee,
kwargs=validated_kwargs,
context=context,
orig_callee=orig_callee,
always_present_keys=always_present_keys,
)
else:
return AnyType(TypeOfAny.from_error)
def typeddict_callable(self, info: TypeInfo) -> CallableType:
"""Construct a reasonable type for a TypedDict type in runtime context.
If it appears as a callee, it will be special-cased anyway, e.g. it is
also allowed to accept a single positional argument if it is a dict literal.
Note it is not safe to move this to type_object_type() since it will crash
on plugin-generated TypedDicts, that may not have the special_alias.
"""
assert info.special_alias is not None
target = info.special_alias.target
assert isinstance(target, ProperType) and isinstance(target, TypedDictType)
expected_types = list(target.items.values())
kinds = [ArgKind.ARG_NAMED] * len(expected_types)
names = list(target.items.keys())
return CallableType(
expected_types,
kinds,
names,
target,
self.named_type("builtins.type"),
variables=info.defn.type_vars,
)
def typeddict_callable_from_context(self, callee: TypedDictType) -> CallableType:
return CallableType(
list(callee.items.values()),
[
ArgKind.ARG_NAMED if name in callee.required_keys else ArgKind.ARG_NAMED_OPT
for name in callee.items
],
list(callee.items.keys()),
callee,
self.named_type("builtins.type"),
)
def check_typeddict_call_with_kwargs(
self,
callee: TypedDictType,
kwargs: dict[str, list[Expression]],
context: Context,
orig_callee: Type | None,
always_present_keys: set[str],
) -> Type:
actual_keys = kwargs.keys()
if callee.to_be_mutated:
assigned_readonly_keys = actual_keys & callee.readonly_keys
if assigned_readonly_keys:
self.msg.readonly_keys_mutated(assigned_readonly_keys, context=context)
if not (
callee.required_keys <= always_present_keys and actual_keys <= callee.items.keys()
):
if not (actual_keys <= callee.items.keys()):
self.msg.unexpected_typeddict_keys(
callee,
expected_keys=[
key
for key in callee.items.keys()
if key in callee.required_keys or key in actual_keys
],
actual_keys=list(actual_keys),
context=context,
)
if not (callee.required_keys <= always_present_keys):
self.msg.unexpected_typeddict_keys(
callee,
expected_keys=[
key for key in callee.items.keys() if key in callee.required_keys
],
actual_keys=[
key for key in always_present_keys if key in callee.required_keys
],
context=context,
)
if callee.required_keys > actual_keys:
# found_set is a sub-set of the required_keys
# This means we're missing some keys and as such, we can't
# properly type the object
return AnyType(TypeOfAny.from_error)
orig_callee = get_proper_type(orig_callee)
if isinstance(orig_callee, CallableType):
infer_callee = orig_callee
else:
# Try reconstructing from type context.
if callee.fallback.type.special_alias is not None:
infer_callee = self.typeddict_callable(callee.fallback.type)
else:
# Likely a TypedDict type generated by a plugin.
infer_callee = self.typeddict_callable_from_context(callee)
# We don't show any errors, just infer types in a generic TypedDict type,
# a custom error message will be given below, if there are errors.
with self.msg.filter_errors(), self.chk.local_type_map():
orig_ret_type, _ = self.check_callable_call(
infer_callee,
# We use first expression for each key to infer type variables of a generic
# TypedDict. This is a bit arbitrary, but in most cases will work better than
# trying to infer a union or a join.
[args[0] for args in kwargs.values()],
[ArgKind.ARG_NAMED] * len(kwargs),
context,
list(kwargs.keys()),
None,
None,
None,
)
ret_type = get_proper_type(orig_ret_type)
if not isinstance(ret_type, TypedDictType):
# If something went really wrong, type-check call with original type,
# this may give a better error message.
ret_type = callee
for item_name, item_expected_type in ret_type.items.items():
if item_name in kwargs:
item_values = kwargs[item_name]
for item_value in item_values:
self.chk.check_simple_assignment(
lvalue_type=item_expected_type,
rvalue=item_value,
context=item_value,
msg=ErrorMessage(
message_registry.INCOMPATIBLE_TYPES.value, code=codes.TYPEDDICT_ITEM
),
lvalue_name=f'TypedDict item "{item_name}"',
rvalue_name="expression",
)
return orig_ret_type
def get_partial_self_var(self, expr: MemberExpr) -> Var | None:
"""Get variable node for a partial self attribute.
If the expression is not a self attribute, or attribute is not variable,
or variable is not partial, return None.
"""
if not (
isinstance(expr.expr, NameExpr)
and isinstance(expr.expr.node, Var)
and expr.expr.node.is_self
):
# Not a self.attr expression.
return None
info = self.chk.scope.enclosing_class()
if not info or expr.name not in info.names:
# Don't mess with partial types in superclasses.
return None
sym = info.names[expr.name]
if isinstance(sym.node, Var) and isinstance(sym.node.type, PartialType):
return sym.node
return None
# Types and methods that can be used to infer partial types.
item_args: ClassVar[dict[str, list[str]]] = {
"builtins.list": ["append"],
"builtins.set": ["add", "discard"],
}
container_args: ClassVar[dict[str, dict[str, list[str]]]] = {
"builtins.list": {"extend": ["builtins.list"]},
"builtins.dict": {"update": ["builtins.dict"]},
"collections.OrderedDict": {"update": ["builtins.dict"]},
"builtins.set": {"update": ["builtins.set", "builtins.list"]},
}
def try_infer_partial_type(self, e: CallExpr) -> None:
"""Try to make partial type precise from a call."""
if not isinstance(e.callee, MemberExpr):
return
callee = e.callee
if isinstance(callee.expr, RefExpr):
# Call a method with a RefExpr callee, such as 'x.method(...)'.
ret = self.get_partial_var(callee.expr)
if ret is None:
return
var, partial_types = ret
typ = self.try_infer_partial_value_type_from_call(e, callee.name, var)
# Var may be deleted from partial_types in try_infer_partial_value_type_from_call
if typ is not None and var in partial_types:
var.type = typ
del partial_types[var]
elif isinstance(callee.expr, IndexExpr) and isinstance(callee.expr.base, RefExpr):
# Call 'x[y].method(...)'; may infer type of 'x' if it's a partial defaultdict.
if callee.expr.analyzed is not None:
return # A special form
base = callee.expr.base
index = callee.expr.index
ret = self.get_partial_var(base)
if ret is None:
return
var, partial_types = ret
partial_type = get_partial_instance_type(var.type)
if partial_type is None or partial_type.value_type is None:
return
value_type = self.try_infer_partial_value_type_from_call(e, callee.name, var)
if value_type is not None:
# Infer key type.
key_type = self.accept(index)
if mypy.checker.is_valid_inferred_type(key_type):
# Store inferred partial type.
assert partial_type.type is not None
typename = partial_type.type.fullname
var.type = self.chk.named_generic_type(typename, [key_type, value_type])
del partial_types[var]
def get_partial_var(self, ref: RefExpr) -> tuple[Var, dict[Var, Context]] | None:
var = ref.node
if var is None and isinstance(ref, MemberExpr):
var = self.get_partial_self_var(ref)
if not isinstance(var, Var):
return None
partial_types = self.chk.find_partial_types(var)
if partial_types is None:
return None
return var, partial_types
def try_infer_partial_value_type_from_call(
self, e: CallExpr, methodname: str, var: Var
) -> Instance | None:
"""Try to make partial type precise from a call such as 'x.append(y)'."""
if self.chk.current_node_deferred:
return None
partial_type = get_partial_instance_type(var.type)
if partial_type is None:
return None
if partial_type.value_type:
typename = partial_type.value_type.type.fullname
else:
assert partial_type.type is not None
typename = partial_type.type.fullname
# Sometimes we can infer a full type for a partial List, Dict or Set type.
# TODO: Don't infer argument expression twice.
if (
typename in self.item_args
and methodname in self.item_args[typename]
and e.arg_kinds == [ARG_POS]
):
item_type = self.accept(e.args[0])
if mypy.checker.is_valid_inferred_type(item_type):
return self.chk.named_generic_type(typename, [item_type])
elif (
typename in self.container_args
and methodname in self.container_args[typename]
and e.arg_kinds == [ARG_POS]
):
arg_type = get_proper_type(self.accept(e.args[0]))
if isinstance(arg_type, Instance):
arg_typename = arg_type.type.fullname
if arg_typename in self.container_args[typename][methodname]:
if all(
mypy.checker.is_valid_inferred_type(item_type)
for item_type in arg_type.args
):
return self.chk.named_generic_type(typename, list(arg_type.args))
elif isinstance(arg_type, AnyType):
return self.chk.named_type(typename)
return None
def apply_function_plugin(
self,
callee: CallableType,
arg_kinds: list[ArgKind],
arg_types: list[Type],
arg_names: Sequence[str | None] | None,
formal_to_actual: list[list[int]],
args: list[Expression],
fullname: str,
object_type: Type | None,
context: Context,
) -> Type:
"""Use special case logic to infer the return type of a specific named function/method.
Caller must ensure that a plugin hook exists. There are two different cases:
- If object_type is None, the caller must ensure that a function hook exists
for fullname.
- If object_type is not None, the caller must ensure that a method hook exists
for fullname.
Return the inferred return type.
"""
num_formals = len(callee.arg_types)
formal_arg_types: list[list[Type]] = [[] for _ in range(num_formals)]
formal_arg_exprs: list[list[Expression]] = [[] for _ in range(num_formals)]
formal_arg_names: list[list[str | None]] = [[] for _ in range(num_formals)]
formal_arg_kinds: list[list[ArgKind]] = [[] for _ in range(num_formals)]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
formal_arg_types[formal].append(arg_types[actual])
formal_arg_exprs[formal].append(args[actual])
if arg_names:
formal_arg_names[formal].append(arg_names[actual])
else:
formal_arg_names[formal].append(None)
formal_arg_kinds[formal].append(arg_kinds[actual])
if object_type is None:
# Apply function plugin
callback = self.plugin.get_function_hook(fullname)
assert callback is not None # Assume that caller ensures this
return callback(
FunctionContext(
arg_types=formal_arg_types,
arg_kinds=formal_arg_kinds,
callee_arg_names=callee.arg_names,
arg_names=formal_arg_names,
default_return_type=callee.ret_type,
args=formal_arg_exprs,
context=context,
api=self.chk,
)
)
else:
# Apply method plugin
method_callback = self.plugin.get_method_hook(fullname)
assert method_callback is not None # Assume that caller ensures this
object_type = get_proper_type(object_type)
return method_callback(
MethodContext(
type=object_type,
arg_types=formal_arg_types,
arg_kinds=formal_arg_kinds,
callee_arg_names=callee.arg_names,
arg_names=formal_arg_names,
default_return_type=callee.ret_type,
args=formal_arg_exprs,
context=context,
api=self.chk,
)
)
def apply_signature_hook(
self,
callee: FunctionLike,
args: list[Expression],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
hook: Callable[[list[list[Expression]], CallableType], FunctionLike],
) -> FunctionLike:
"""Helper to apply a signature hook for either a function or method"""
if isinstance(callee, CallableType):
num_formals = len(callee.arg_kinds)
formal_to_actual = map_actuals_to_formals(
arg_kinds,
arg_names,
callee.arg_kinds,
callee.arg_names,
lambda i: self.accept(args[i]),
)
formal_arg_exprs: list[list[Expression]] = [[] for _ in range(num_formals)]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
formal_arg_exprs[formal].append(args[actual])
return hook(formal_arg_exprs, callee)
else:
assert isinstance(callee, Overloaded)
items = []
for item in callee.items:
adjusted = self.apply_signature_hook(item, args, arg_kinds, arg_names, hook)
assert isinstance(adjusted, CallableType)
items.append(adjusted)
return Overloaded(items)
def apply_function_signature_hook(
self,
callee: FunctionLike,
args: list[Expression],
arg_kinds: list[ArgKind],
context: Context,
arg_names: Sequence[str | None] | None,
signature_hook: Callable[[FunctionSigContext], FunctionLike],
) -> FunctionLike:
"""Apply a plugin hook that may infer a more precise signature for a function."""
return self.apply_signature_hook(
callee,
args,
arg_kinds,
arg_names,
(lambda args, sig: signature_hook(FunctionSigContext(args, sig, context, self.chk))),
)
def apply_method_signature_hook(
self,
callee: FunctionLike,
args: list[Expression],
arg_kinds: list[ArgKind],
context: Context,
arg_names: Sequence[str | None] | None,
object_type: Type,
signature_hook: Callable[[MethodSigContext], FunctionLike],
) -> FunctionLike:
"""Apply a plugin hook that may infer a more precise signature for a method."""
pobject_type = get_proper_type(object_type)
return self.apply_signature_hook(
callee,
args,
arg_kinds,
arg_names,
(
lambda args, sig: signature_hook(
MethodSigContext(pobject_type, args, sig, context, self.chk)
)
),
)
def transform_callee_type(
self,
callable_name: str | None,
callee: Type,
args: list[Expression],
arg_kinds: list[ArgKind],
context: Context,
arg_names: Sequence[str | None] | None = None,
object_type: Type | None = None,
) -> Type:
"""Attempt to determine a more accurate signature for a method call.
This is done by looking up and applying a method signature hook (if one exists for the
given method name).
If no matching method signature hook is found, callee is returned unmodified. The same
happens if the arguments refer to a non-method callable (this is allowed so that the code
calling transform_callee_type needs to perform fewer boilerplate checks).
Note: this method is *not* called automatically as part of check_call, because in some
cases check_call is called multiple times while checking a single call (for example when
dealing with overloads). Instead, this method needs to be called explicitly
(if appropriate) before the signature is passed to check_call.
"""
callee = get_proper_type(callee)
if callable_name is not None and isinstance(callee, FunctionLike):
if object_type is not None:
method_sig_hook = self.plugin.get_method_signature_hook(callable_name)
if method_sig_hook:
return self.apply_method_signature_hook(
callee, args, arg_kinds, context, arg_names, object_type, method_sig_hook
)
else:
function_sig_hook = self.plugin.get_function_signature_hook(callable_name)
if function_sig_hook:
return self.apply_function_signature_hook(
callee, args, arg_kinds, context, arg_names, function_sig_hook
)
return callee
def is_generic_decorator_overload_call(
self, callee_type: CallableType, args: list[Expression]
) -> Overloaded | None:
"""Check if this looks like an application of a generic function to overload argument."""
assert callee_type.variables
if len(callee_type.arg_types) != 1 or len(args) != 1:
# TODO: can we handle more general cases?
return None
if not isinstance(get_proper_type(callee_type.arg_types[0]), CallableType):
return None
if not isinstance(get_proper_type(callee_type.ret_type), CallableType):
return None
with self.chk.local_type_map():
with self.msg.filter_errors():
arg_type = get_proper_type(self.accept(args[0], type_context=None))
if isinstance(arg_type, Overloaded):
return arg_type
return None
def handle_decorator_overload_call(
self, callee_type: CallableType, overloaded: Overloaded, ctx: Context
) -> tuple[Type, Type] | None:
"""Type-check application of a generic callable to an overload.
We check call on each individual overload item, and then combine results into a new
overload. This function should be only used if callee_type takes and returns a Callable.
"""
result = []
inferred_args = []
for item in overloaded.items:
arg = TempNode(typ=item)
with self.msg.filter_errors() as err:
item_result, inferred_arg = self.check_call(callee_type, [arg], [ARG_POS], ctx)
if err.has_new_errors():
# This overload doesn't match.
continue
p_item_result = get_proper_type(item_result)
if not isinstance(p_item_result, CallableType):
continue
p_inferred_arg = get_proper_type(inferred_arg)
if not isinstance(p_inferred_arg, CallableType):
continue
inferred_args.append(p_inferred_arg)
result.append(p_item_result)
if not result or not inferred_args:
# None of the overload matched (or overload was initially malformed).
return None
return Overloaded(result), Overloaded(inferred_args)
def check_call_expr_with_callee_type(
self,
callee_type: Type,
e: CallExpr,
callable_name: str | None,
object_type: Type | None,
member: str | None = None,
) -> Type:
"""Type check call expression.
The callee_type should be used as the type of callee expression. In particular,
in case of a union type this can be a particular item of the union, so that we can
apply plugin hooks to each item.
The 'member', 'callable_name' and 'object_type' are only used to call plugin hooks.
If 'callable_name' is None but 'member' is not None (member call), try constructing
'callable_name' using 'object_type' (the base type on which the method is called),
for example 'typing.Mapping.get'.
"""
if callable_name is None and member is not None:
assert object_type is not None
callable_name = self.method_fullname(object_type, member)
object_type = get_proper_type(object_type)
if callable_name:
# Try to refine the call signature using plugin hooks before checking the call.
callee_type = self.transform_callee_type(
callable_name, callee_type, e.args, e.arg_kinds, e, e.arg_names, object_type
)
# Unions are special-cased to allow plugins to act on each item in the union.
elif member is not None and isinstance(object_type, UnionType):
return self.check_union_call_expr(e, object_type, member)
ret_type, callee_type = self.check_call(
callee_type,
e.args,
e.arg_kinds,
e,
e.arg_names,
callable_node=e.callee,
callable_name=callable_name,
object_type=object_type,
)
proper_callee = get_proper_type(callee_type)
if isinstance(e.callee, RefExpr) and isinstance(proper_callee, CallableType):
# Cache it for find_isinstance_check()
if proper_callee.type_guard is not None:
e.callee.type_guard = proper_callee.type_guard
if proper_callee.type_is is not None:
e.callee.type_is = proper_callee.type_is
return ret_type
def check_union_call_expr(self, e: CallExpr, object_type: UnionType, member: str) -> Type:
"""Type check calling a member expression where the base type is a union."""
res: list[Type] = []
for typ in object_type.relevant_items():
# Member access errors are already reported when visiting the member expression.
with self.msg.filter_errors():
item = analyze_member_access(
member,
typ,
e,
is_lvalue=False,
is_super=False,
is_operator=False,
msg=self.msg,
original_type=object_type,
chk=self.chk,
in_literal_context=self.is_literal_context(),
self_type=typ,
)
narrowed = self.narrow_type_from_binder(e.callee, item, skip_non_overlapping=True)
if narrowed is None:
continue
callable_name = self.method_fullname(typ, member)
item_object_type = typ if callable_name else None
res.append(
self.check_call_expr_with_callee_type(narrowed, e, callable_name, item_object_type)
)
return make_simplified_union(res)
def check_call(
self,
callee: Type,
args: list[Expression],
arg_kinds: list[ArgKind],
context: Context,
arg_names: Sequence[str | None] | None = None,
callable_node: Expression | None = None,
callable_name: str | None = None,
object_type: Type | None = None,
original_type: Type | None = None,
) -> tuple[Type, Type]:
"""Type check a call.
Also infer type arguments if the callee is a generic function.
Return (result type, inferred callee type).
Arguments:
callee: type of the called value
args: actual argument expressions
arg_kinds: contains nodes.ARG_* constant for each argument in args
describing whether the argument is positional, *arg, etc.
context: current expression context, used for inference.
arg_names: names of arguments (optional)
callable_node: associate the inferred callable type to this node,
if specified
callable_name: Fully-qualified name of the function/method to call,
or None if unavailable (examples: 'builtins.open', 'typing.Mapping.get')
object_type: If callable_name refers to a method, the type of the object
on which the method is being called
"""
callee = get_proper_type(callee)
if isinstance(callee, CallableType):
if callee.variables:
overloaded = self.is_generic_decorator_overload_call(callee, args)
if overloaded is not None:
# Special casing for inline application of generic callables to overloads.
# Supporting general case would be tricky, but this should cover 95% of cases.
overloaded_result = self.handle_decorator_overload_call(
callee, overloaded, context
)
if overloaded_result is not None:
return overloaded_result
return self.check_callable_call(
callee,
args,
arg_kinds,
context,
arg_names,
callable_node,
callable_name,
object_type,
)
elif isinstance(callee, Overloaded):
return self.check_overload_call(
callee, args, arg_kinds, arg_names, callable_name, object_type, context
)
elif isinstance(callee, AnyType) or not self.chk.in_checked_function():
return self.check_any_type_call(args, callee)
elif isinstance(callee, UnionType):
return self.check_union_call(callee, args, arg_kinds, arg_names, context)
elif isinstance(callee, Instance):
call_function = analyze_member_access(
"__call__",
callee,
context,
is_lvalue=False,
is_super=False,
is_operator=True,
msg=self.msg,
original_type=original_type or callee,
chk=self.chk,
in_literal_context=self.is_literal_context(),
)
callable_name = callee.type.fullname + ".__call__"
# Apply method signature hook, if one exists
call_function = self.transform_callee_type(
callable_name, call_function, args, arg_kinds, context, arg_names, callee
)
result = self.check_call(
call_function,
args,
arg_kinds,
context,
arg_names,
callable_node,
callable_name,
callee,
)
if callable_node:
# check_call() stored "call_function" as the type, which is incorrect.
# Override the type.
self.chk.store_type(callable_node, callee)
return result
elif isinstance(callee, TypeVarType):
return self.check_call(
callee.upper_bound, args, arg_kinds, context, arg_names, callable_node
)
elif isinstance(callee, TypeType):
item = self.analyze_type_type_callee(callee.item, context)
return self.check_call(item, args, arg_kinds, context, arg_names, callable_node)
elif isinstance(callee, TupleType):
return self.check_call(
tuple_fallback(callee),
args,
arg_kinds,
context,
arg_names,
callable_node,
callable_name,
object_type,
original_type=callee,
)
else:
return self.msg.not_callable(callee, context), AnyType(TypeOfAny.from_error)
def check_callable_call(
self,
callee: CallableType,
args: list[Expression],
arg_kinds: list[ArgKind],
context: Context,
arg_names: Sequence[str | None] | None,
callable_node: Expression | None,
callable_name: str | None,
object_type: Type | None,
) -> tuple[Type, Type]:
"""Type check a call that targets a callable value.
See the docstring of check_call for more information.
"""
# Always unpack **kwargs before checking a call.
callee = callee.with_unpacked_kwargs().with_normalized_var_args()
if callable_name is None and callee.name:
callable_name = callee.name
ret_type = get_proper_type(callee.ret_type)
if callee.is_type_obj() and isinstance(ret_type, Instance):
callable_name = ret_type.type.fullname
if isinstance(callable_node, RefExpr) and callable_node.fullname in ENUM_BASES:
# An Enum() call that failed SemanticAnalyzerPass2.check_enum_call().
return callee.ret_type, callee
if (
callee.is_type_obj()
and callee.type_object().is_protocol
# Exception for Type[...]
and not callee.from_type_type
):
self.chk.fail(
message_registry.CANNOT_INSTANTIATE_PROTOCOL.format(callee.type_object().name),
context,
)
elif (
callee.is_type_obj()
and callee.type_object().is_abstract
# Exception for Type[...]
and not callee.from_type_type
and not callee.type_object().fallback_to_any
):
type = callee.type_object()
# Determine whether the implicitly abstract attributes are functions with
# None-compatible return types.
abstract_attributes: dict[str, bool] = {}
for attr_name, abstract_status in type.abstract_attributes:
if abstract_status == IMPLICITLY_ABSTRACT:
abstract_attributes[attr_name] = self.can_return_none(type, attr_name)
else:
abstract_attributes[attr_name] = False
self.msg.cannot_instantiate_abstract_class(
callee.type_object().name, abstract_attributes, context
)
var_arg = callee.var_arg()
if var_arg and isinstance(var_arg.typ, UnpackType):
# It is hard to support multiple variadic unpacks (except for old-style *args: int),
# fail gracefully to avoid crashes later.
seen_unpack = False
for arg, arg_kind in zip(args, arg_kinds):
if arg_kind != ARG_STAR:
continue
arg_type = get_proper_type(self.accept(arg))
if not isinstance(arg_type, TupleType) or any(
isinstance(t, UnpackType) for t in arg_type.items
):
if seen_unpack:
self.msg.fail(
"Passing multiple variadic unpacks in a call is not supported",
context,
code=codes.CALL_ARG,
)
return AnyType(TypeOfAny.from_error), callee
seen_unpack = True
formal_to_actual = map_actuals_to_formals(
arg_kinds,
arg_names,
callee.arg_kinds,
callee.arg_names,
lambda i: self.accept(args[i]),
)
# This is tricky: return type may contain its own type variables, like in
# def [S] (S) -> def [T] (T) -> tuple[S, T], so we need to update their ids
# to avoid possible id clashes if this call itself appears in a generic
# function body.
ret_type = get_proper_type(callee.ret_type)
if isinstance(ret_type, CallableType) and ret_type.variables:
fresh_ret_type = freshen_all_functions_type_vars(callee.ret_type)
freeze_all_type_vars(fresh_ret_type)
callee = callee.copy_modified(ret_type=fresh_ret_type)
if callee.is_generic():
need_refresh = any(
isinstance(v, (ParamSpecType, TypeVarTupleType)) for v in callee.variables
)
callee = freshen_function_type_vars(callee)
callee = self.infer_function_type_arguments_using_context(callee, context)
if need_refresh:
# Argument kinds etc. may have changed due to
# ParamSpec or TypeVarTuple variables being replaced with an arbitrary
# number of arguments; recalculate actual-to-formal map
formal_to_actual = map_actuals_to_formals(
arg_kinds,
arg_names,
callee.arg_kinds,
callee.arg_names,
lambda i: self.accept(args[i]),
)
callee = self.infer_function_type_arguments(
callee, args, arg_kinds, arg_names, formal_to_actual, need_refresh, context
)
if need_refresh:
formal_to_actual = map_actuals_to_formals(
arg_kinds,
arg_names,
callee.arg_kinds,
callee.arg_names,
lambda i: self.accept(args[i]),
)
param_spec = callee.param_spec()
if (
param_spec is not None
and arg_kinds == [ARG_STAR, ARG_STAR2]
and len(formal_to_actual) == 2
):
arg1 = self.accept(args[0])
arg2 = self.accept(args[1])
if (
isinstance(arg1, ParamSpecType)
and isinstance(arg2, ParamSpecType)
and arg1.flavor == ParamSpecFlavor.ARGS
and arg2.flavor == ParamSpecFlavor.KWARGS
and arg1.id == arg2.id == param_spec.id
):
return callee.ret_type, callee
arg_types = self.infer_arg_types_in_context(callee, args, arg_kinds, formal_to_actual)
self.check_argument_count(
callee,
arg_types,
arg_kinds,
arg_names,
formal_to_actual,
context,
object_type,
callable_name,
)
self.check_argument_types(
arg_types, arg_kinds, args, callee, formal_to_actual, context, object_type=object_type
)
if (
callee.is_type_obj()
and (len(arg_types) == 1)
and is_equivalent(callee.ret_type, self.named_type("builtins.type"))
):
callee = callee.copy_modified(ret_type=TypeType.make_normalized(arg_types[0]))
if callable_node:
# Store the inferred callable type.
self.chk.store_type(callable_node, callee)
if callable_name and (
(object_type is None and self.plugin.get_function_hook(callable_name))
or (object_type is not None and self.plugin.get_method_hook(callable_name))
):
new_ret_type = self.apply_function_plugin(
callee,
arg_kinds,
arg_types,
arg_names,
formal_to_actual,
args,
callable_name,
object_type,
context,
)
callee = callee.copy_modified(ret_type=new_ret_type)
return callee.ret_type, callee
def can_return_none(self, type: TypeInfo, attr_name: str) -> bool:
"""Is the given attribute a method with a None-compatible return type?
Overloads are only checked if there is an implementation.
"""
if not state.strict_optional:
# If strict-optional is not set, is_subtype(NoneType(), T) is always True.
# So, we cannot do anything useful here in that case.
return False
for base in type.mro:
symnode = base.names.get(attr_name)
if symnode is None:
continue
node = symnode.node
if isinstance(node, OverloadedFuncDef):
node = node.impl
if isinstance(node, Decorator):
node = node.func
if isinstance(node, FuncDef):
if node.type is not None:
assert isinstance(node.type, CallableType)
return is_subtype(NoneType(), node.type.ret_type)
return False
def analyze_type_type_callee(self, item: ProperType, context: Context) -> Type:
"""Analyze the callee X in X(...) where X is Type[item].
Return a Y that we can pass to check_call(Y, ...).
"""
if isinstance(item, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=item)
if isinstance(item, Instance):
res = type_object_type(item.type, self.named_type)
if isinstance(res, CallableType):
res = res.copy_modified(from_type_type=True)
expanded = expand_type_by_instance(res, item)
if isinstance(expanded, CallableType):
# Callee of the form Type[...] should never be generic, only
# proper class objects can be.
expanded = expanded.copy_modified(variables=[])
return expanded
if isinstance(item, UnionType):
return UnionType(
[
self.analyze_type_type_callee(get_proper_type(tp), context)
for tp in item.relevant_items()
],
item.line,
)
if isinstance(item, TypeVarType):
# Pretend we're calling the typevar's upper bound,
# i.e. its constructor (a poor approximation for reality,
# but better than AnyType...), but replace the return type
# with typevar.
callee = self.analyze_type_type_callee(get_proper_type(item.upper_bound), context)
callee = get_proper_type(callee)
if isinstance(callee, CallableType):
callee = callee.copy_modified(ret_type=item)
elif isinstance(callee, Overloaded):
callee = Overloaded([c.copy_modified(ret_type=item) for c in callee.items])
return callee
# We support Type of namedtuples but not of tuples in general
if isinstance(item, TupleType) and tuple_fallback(item).type.fullname != "builtins.tuple":
return self.analyze_type_type_callee(tuple_fallback(item), context)
if isinstance(item, TypedDictType):
return self.typeddict_callable_from_context(item)
self.msg.unsupported_type_type(item, context)
return AnyType(TypeOfAny.from_error)
def infer_arg_types_in_empty_context(self, args: list[Expression]) -> list[Type]:
"""Infer argument expression types in an empty context.
In short, we basically recurse on each argument without considering
in what context the argument was called.
"""
res: list[Type] = []
for arg in args:
arg_type = self.accept(arg)
if has_erased_component(arg_type):
res.append(NoneType())
else:
res.append(arg_type)
return res
def infer_more_unions_for_recursive_type(self, type_context: Type) -> bool:
"""Adjust type inference of unions if type context has a recursive type.
Return the old state. The caller must assign it to type_state.infer_unions
afterwards.
This is a hack to better support inference for recursive types.
Note: This is performance-sensitive and must not be a context manager
until mypyc supports them better.
"""
old = type_state.infer_unions
if has_recursive_types(type_context):
type_state.infer_unions = True
return old
def infer_arg_types_in_context(
self,
callee: CallableType,
args: list[Expression],
arg_kinds: list[ArgKind],
formal_to_actual: list[list[int]],
) -> list[Type]:
"""Infer argument expression types using a callable type as context.
For example, if callee argument 2 has type List[int], infer the
argument expression with List[int] type context.
Returns the inferred types of *actual arguments*.
"""
res: list[Type | None] = [None] * len(args)
for i, actuals in enumerate(formal_to_actual):
for ai in actuals:
if not arg_kinds[ai].is_star():
arg_type = callee.arg_types[i]
# When the outer context for a function call is known to be recursive,
# we solve type constraints inferred from arguments using unions instead
# of joins. This is a bit arbitrary, but in practice it works for most
# cases. A cleaner alternative would be to switch to single bin type
# inference, but this is a lot of work.
old = self.infer_more_unions_for_recursive_type(arg_type)
res[ai] = self.accept(args[ai], arg_type)
# We need to manually restore union inference state, ugh.
type_state.infer_unions = old
# Fill in the rest of the argument types.
for i, t in enumerate(res):
if not t:
res[i] = self.accept(args[i])
assert all(tp is not None for tp in res)
return cast(List[Type], res)
def infer_function_type_arguments_using_context(
self, callable: CallableType, error_context: Context
) -> CallableType:
"""Unify callable return type to type context to infer type vars.
For example, if the return type is set[t] where 't' is a type variable
of callable, and if the context is set[int], return callable modified
by substituting 't' with 'int'.
"""
ctx = self.type_context[-1]
if not ctx:
return callable
# The return type may have references to type metavariables that
# we are inferring right now. We must consider them as indeterminate
# and they are not potential results; thus we replace them with the
# special ErasedType type. On the other hand, class type variables are
# valid results.
erased_ctx = replace_meta_vars(ctx, ErasedType())
ret_type = callable.ret_type
if is_overlapping_none(ret_type) and is_overlapping_none(ctx):
# If both the context and the return type are optional, unwrap the optional,
# since in 99% cases this is what a user expects. In other words, we replace
# Optional[T] <: Optional[int]
# with
# T <: int
# while the former would infer T <: Optional[int].
ret_type = remove_optional(ret_type)
erased_ctx = remove_optional(erased_ctx)
#
# TODO: Instead of this hack and the one below, we need to use outer and
# inner contexts at the same time. This is however not easy because of two
# reasons:
# * We need to support constraints like [1 <: 2, 2 <: X], i.e. with variables
# on both sides. (This is not too hard.)
# * We need to update all the inference "infrastructure", so that all
# variables in an expression are inferred at the same time.
# (And this is hard, also we need to be careful with lambdas that require
# two passes.)
if isinstance(ret_type, TypeVarType):
# Another special case: the return type is a type variable. If it's unrestricted,
# we could infer a too general type for the type variable if we use context,
# and this could result in confusing and spurious type errors elsewhere.
#
# So we give up and just use function arguments for type inference, with just two
# exceptions:
#
# 1. If the context is a generic instance type, actually use it as context, as
# this *seems* to usually be the reasonable thing to do.
#
# See also github issues #462 and #360.
#
# 2. If the context is some literal type, we want to "propagate" that information
# down so that we infer a more precise type for literal expressions. For example,
# the expression `3` normally has an inferred type of `builtins.int`: but if it's
# in a literal context like below, we want it to infer `Literal[3]` instead.
#
# def expects_literal(x: Literal[3]) -> None: pass
# def identity(x: T) -> T: return x
#
# expects_literal(identity(3)) # Should type-check
# TODO: we may want to add similar exception if all arguments are lambdas, since
# in this case external context is almost everything we have.
if not is_generic_instance(ctx) and not is_literal_type_like(ctx):
return callable.copy_modified()
args = infer_type_arguments(
callable.variables, ret_type, erased_ctx, skip_unsatisfied=True
)
# Only substitute non-Uninhabited and non-erased types.
new_args: list[Type | None] = []
for arg in args:
if has_uninhabited_component(arg) or has_erased_component(arg):
new_args.append(None)
else:
new_args.append(arg)
# Don't show errors after we have only used the outer context for inference.
# We will use argument context to infer more variables.
return self.apply_generic_arguments(
callable, new_args, error_context, skip_unsatisfied=True
)
def infer_function_type_arguments(
self,
callee_type: CallableType,
args: list[Expression],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
formal_to_actual: list[list[int]],
need_refresh: bool,
context: Context,
) -> CallableType:
"""Infer the type arguments for a generic callee type.
Infer based on the types of arguments.
Return a derived callable type that has the arguments applied.
"""
if self.chk.in_checked_function():
# Disable type errors during type inference. There may be errors
# due to partial available context information at this time, but
# these errors can be safely ignored as the arguments will be
# inferred again later.
with self.msg.filter_errors():
arg_types = self.infer_arg_types_in_context(
callee_type, args, arg_kinds, formal_to_actual
)
arg_pass_nums = self.get_arg_infer_passes(
callee_type, args, arg_types, formal_to_actual, len(args)
)
pass1_args: list[Type | None] = []
for i, arg in enumerate(arg_types):
if arg_pass_nums[i] > 1:
pass1_args.append(None)
else:
pass1_args.append(arg)
inferred_args, _ = infer_function_type_arguments(
callee_type,
pass1_args,
arg_kinds,
arg_names,
formal_to_actual,
context=self.argument_infer_context(),
strict=self.chk.in_checked_function(),
)
if 2 in arg_pass_nums:
# Second pass of type inference.
(callee_type, inferred_args) = self.infer_function_type_arguments_pass2(
callee_type,
args,
arg_kinds,
arg_names,
formal_to_actual,
inferred_args,
need_refresh,
context,
)
if (
callee_type.special_sig == "dict"
and len(inferred_args) == 2
and (ARG_NAMED in arg_kinds or ARG_STAR2 in arg_kinds)
):
# HACK: Infer str key type for dict(...) with keyword args. The type system
# can't represent this so we special case it, as this is a pretty common
# thing. This doesn't quite work with all possible subclasses of dict
# if they shuffle type variables around, as we assume that there is a 1-1
# correspondence with dict type variables. This is a marginal issue and
# a little tricky to fix so it's left unfixed for now.
first_arg = get_proper_type(inferred_args[0])
if isinstance(first_arg, (NoneType, UninhabitedType)):
inferred_args[0] = self.named_type("builtins.str")
elif not first_arg or not is_subtype(self.named_type("builtins.str"), first_arg):
self.chk.fail(message_registry.KEYWORD_ARGUMENT_REQUIRES_STR_KEY_TYPE, context)
if not self.chk.options.old_type_inference and any(
a is None
or isinstance(get_proper_type(a), UninhabitedType)
or set(get_type_vars(a)) & set(callee_type.variables)
for a in inferred_args
):
if need_refresh:
# Technically we need to refresh formal_to_actual after *each* inference pass,
# since each pass can expand ParamSpec or TypeVarTuple. Although such situations
# are very rare, not doing this can cause crashes.
formal_to_actual = map_actuals_to_formals(
arg_kinds,
arg_names,
callee_type.arg_kinds,
callee_type.arg_names,
lambda a: self.accept(args[a]),
)
# If the regular two-phase inference didn't work, try inferring type
# variables while allowing for polymorphic solutions, i.e. for solutions
# potentially involving free variables.
# TODO: support the similar inference for return type context.
poly_inferred_args, free_vars = infer_function_type_arguments(
callee_type,
arg_types,
arg_kinds,
arg_names,
formal_to_actual,
context=self.argument_infer_context(),
strict=self.chk.in_checked_function(),
allow_polymorphic=True,
)
poly_callee_type = self.apply_generic_arguments(
callee_type, poly_inferred_args, context
)
# Try applying inferred polymorphic type if possible, e.g. Callable[[T], T] can
# be interpreted as def [T] (T) -> T, but dict[T, T] cannot be expressed.
applied = applytype.apply_poly(poly_callee_type, free_vars)
if applied is not None and all(
a is not None and not isinstance(get_proper_type(a), UninhabitedType)
for a in poly_inferred_args
):
freeze_all_type_vars(applied)
return applied
# If it didn't work, erase free variables as uninhabited, to avoid confusing errors.
unknown = UninhabitedType()
unknown.ambiguous = True
inferred_args = [
(
expand_type(
a, {v.id: unknown for v in list(callee_type.variables) + free_vars}
)
if a is not None
else None
)
for a in poly_inferred_args
]
else:
# In dynamically typed functions use implicit 'Any' types for
# type variables.
inferred_args = [AnyType(TypeOfAny.unannotated)] * len(callee_type.variables)
return self.apply_inferred_arguments(callee_type, inferred_args, context)
def infer_function_type_arguments_pass2(
self,
callee_type: CallableType,
args: list[Expression],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
formal_to_actual: list[list[int]],
old_inferred_args: Sequence[Type | None],
need_refresh: bool,
context: Context,
) -> tuple[CallableType, list[Type | None]]:
"""Perform second pass of generic function type argument inference.
The second pass is needed for arguments with types such as Callable[[T], S],
where both T and S are type variables, when the actual argument is a
lambda with inferred types. The idea is to infer the type variable T
in the first pass (based on the types of other arguments). This lets
us infer the argument and return type of the lambda expression and
thus also the type variable S in this second pass.
Return (the callee with type vars applied, inferred actual arg types).
"""
# None or erased types in inferred types mean that there was not enough
# information to infer the argument. Replace them with None values so
# that they are not applied yet below.
inferred_args = list(old_inferred_args)
for i, arg in enumerate(get_proper_types(inferred_args)):
if isinstance(arg, (NoneType, UninhabitedType)) or has_erased_component(arg):
inferred_args[i] = None
callee_type = self.apply_generic_arguments(callee_type, inferred_args, context)
if need_refresh:
formal_to_actual = map_actuals_to_formals(
arg_kinds,
arg_names,
callee_type.arg_kinds,
callee_type.arg_names,
lambda a: self.accept(args[a]),
)
# Same as during first pass, disable type errors (we still have partial context).
with self.msg.filter_errors():
arg_types = self.infer_arg_types_in_context(
callee_type, args, arg_kinds, formal_to_actual
)
inferred_args, _ = infer_function_type_arguments(
callee_type,
arg_types,
arg_kinds,
arg_names,
formal_to_actual,
context=self.argument_infer_context(),
)
return callee_type, inferred_args
def argument_infer_context(self) -> ArgumentInferContext:
return ArgumentInferContext(
self.chk.named_type("typing.Mapping"), self.chk.named_type("typing.Iterable")
)
def get_arg_infer_passes(
self,
callee: CallableType,
args: list[Expression],
arg_types: list[Type],
formal_to_actual: list[list[int]],
num_actuals: int,
) -> list[int]:
"""Return pass numbers for args for two-pass argument type inference.
For each actual, the pass number is either 1 (first pass) or 2 (second
pass).
Two-pass argument type inference primarily lets us infer types of
lambdas more effectively.
"""
res = [1] * num_actuals
for i, arg in enumerate(callee.arg_types):
skip_param_spec = False
p_formal = get_proper_type(callee.arg_types[i])
if isinstance(p_formal, CallableType) and p_formal.param_spec():
for j in formal_to_actual[i]:
p_actual = get_proper_type(arg_types[j])
# This is an exception from the usual logic where we put generic Callable
# arguments in the second pass. If we have a non-generic actual, it is
# likely to infer good constraints, for example if we have:
# def run(Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None: ...
# def test(x: int, y: int) -> int: ...
# run(test, 1, 2)
# we will use `test` for inference, since it will allow to infer also
# argument *names* for P <: [x: int, y: int].
if isinstance(p_actual, Instance):
call_method = find_member("__call__", p_actual, p_actual, is_operator=True)
if call_method is not None:
p_actual = get_proper_type(call_method)
if (
isinstance(p_actual, CallableType)
and not p_actual.variables
and not isinstance(args[j], LambdaExpr)
):
skip_param_spec = True
break
if not skip_param_spec and arg.accept(ArgInferSecondPassQuery()):
for j in formal_to_actual[i]:
res[j] = 2
return res
def apply_inferred_arguments(
self, callee_type: CallableType, inferred_args: Sequence[Type | None], context: Context
) -> CallableType:
"""Apply inferred values of type arguments to a generic function.
Inferred_args contains the values of function type arguments.
"""
# Report error if some of the variables could not be solved. In that
# case assume that all variables have type Any to avoid extra
# bogus error messages.
for i, inferred_type in enumerate(inferred_args):
if not inferred_type or has_erased_component(inferred_type):
# Could not infer a non-trivial type for a type variable.
self.msg.could_not_infer_type_arguments(callee_type, i + 1, context)
inferred_args = [AnyType(TypeOfAny.from_error)] * len(inferred_args)
# Apply the inferred types to the function type. In this case the
# return type must be CallableType, since we give the right number of type
# arguments.
return self.apply_generic_arguments(callee_type, inferred_args, context)
def check_argument_count(
self,
callee: CallableType,
actual_types: list[Type],
actual_kinds: list[ArgKind],
actual_names: Sequence[str | None] | None,
formal_to_actual: list[list[int]],
context: Context | None,
object_type: Type | None = None,
callable_name: str | None = None,
) -> bool:
"""Check that there is a value for all required arguments to a function.
Also check that there are no duplicate values for arguments. Report found errors
using 'messages' if it's not None. If 'messages' is given, 'context' must also be given.
Return False if there were any errors. Otherwise return True
"""
if context is None:
# Avoid "is None" checks
context = TempNode(AnyType(TypeOfAny.special_form))
# TODO(jukka): We could return as soon as we find an error if messages is None.
# Collect dict of all actual arguments matched to formal arguments, with occurrence count
all_actuals: dict[int, int] = {}
for actuals in formal_to_actual:
for a in actuals:
all_actuals[a] = all_actuals.get(a, 0) + 1
ok, is_unexpected_arg_error = self.check_for_extra_actual_arguments(
callee, actual_types, actual_kinds, actual_names, all_actuals, context
)
# Check for too many or few values for formals.
for i, kind in enumerate(callee.arg_kinds):
if kind.is_required() and not formal_to_actual[i] and not is_unexpected_arg_error:
# No actual for a mandatory formal
if kind.is_positional():
self.msg.too_few_arguments(callee, context, actual_names)
if object_type and callable_name and "." in callable_name:
self.missing_classvar_callable_note(object_type, callable_name, context)
else:
argname = callee.arg_names[i] or "?"
self.msg.missing_named_argument(callee, context, argname)
ok = False
elif not kind.is_star() and is_duplicate_mapping(
formal_to_actual[i], actual_types, actual_kinds
):
if self.chk.in_checked_function() or isinstance(
get_proper_type(actual_types[formal_to_actual[i][0]]), TupleType
):
self.msg.duplicate_argument_value(callee, i, context)
ok = False
elif (
kind.is_named()
and formal_to_actual[i]
and actual_kinds[formal_to_actual[i][0]] not in [nodes.ARG_NAMED, nodes.ARG_STAR2]
):
# Positional argument when expecting a keyword argument.
self.msg.too_many_positional_arguments(callee, context)
ok = False
elif callee.param_spec() is not None and not formal_to_actual[i]:
self.msg.too_few_arguments(callee, context, actual_names)
ok = False
return ok
def check_for_extra_actual_arguments(
self,
callee: CallableType,
actual_types: list[Type],
actual_kinds: list[ArgKind],
actual_names: Sequence[str | None] | None,
all_actuals: dict[int, int],
context: Context,
) -> tuple[bool, bool]:
"""Check for extra actual arguments.
Return tuple (was everything ok,
was there an extra keyword argument error [used to avoid duplicate errors]).
"""
is_unexpected_arg_error = False # Keep track of errors to avoid duplicate errors
ok = True # False if we've found any error
for i, kind in enumerate(actual_kinds):
if (
i not in all_actuals
and
# We accept the other iterables than tuple (including Any)
# as star arguments because they could be empty, resulting no arguments.
(kind != nodes.ARG_STAR or is_non_empty_tuple(actual_types[i]))
and
# Accept all types for double-starred arguments, because they could be empty
# dictionaries and we can't tell it from their types
kind != nodes.ARG_STAR2
):
# Extra actual: not matched by a formal argument.
ok = False
if kind != nodes.ARG_NAMED:
self.msg.too_many_arguments(callee, context)
else:
assert actual_names, "Internal error: named kinds without names given"
act_name = actual_names[i]
assert act_name is not None
act_type = actual_types[i]
self.msg.unexpected_keyword_argument(callee, act_name, act_type, context)
is_unexpected_arg_error = True
elif (
kind == nodes.ARG_STAR and nodes.ARG_STAR not in callee.arg_kinds
) or kind == nodes.ARG_STAR2:
actual_type = get_proper_type(actual_types[i])
if isinstance(actual_type, (TupleType, TypedDictType)):
if all_actuals.get(i, 0) < len(actual_type.items):
# Too many tuple/dict items as some did not match.
if kind != nodes.ARG_STAR2 or not isinstance(actual_type, TypedDictType):
self.msg.too_many_arguments(callee, context)
else:
self.msg.too_many_arguments_from_typed_dict(
callee, actual_type, context
)
is_unexpected_arg_error = True
ok = False
# *args/**kwargs can be applied even if the function takes a fixed
# number of positional arguments. This may succeed at runtime.
return ok, is_unexpected_arg_error
def missing_classvar_callable_note(
self, object_type: Type, callable_name: str, context: Context
) -> None:
if isinstance(object_type, ProperType) and isinstance(object_type, Instance):
_, var_name = callable_name.rsplit(".", maxsplit=1)
node = object_type.type.get(var_name)
if node is not None and isinstance(node.node, Var):
if not node.node.is_inferred and not node.node.is_classvar:
self.msg.note(
f'"{var_name}" is considered instance variable,'
" to make it class variable use ClassVar[...]",
context,
)
def check_argument_types(
self,
arg_types: list[Type],
arg_kinds: list[ArgKind],
args: list[Expression],
callee: CallableType,
formal_to_actual: list[list[int]],
context: Context,
check_arg: ArgChecker | None = None,
object_type: Type | None = None,
) -> None:
"""Check argument types against a callable type.
Report errors if the argument types are not compatible.
The check_call docstring describes some of the arguments.
"""
check_arg = check_arg or self.check_arg
# Keep track of consumed tuple *arg items.
mapper = ArgTypeExpander(self.argument_infer_context())
for i, actuals in enumerate(formal_to_actual):
orig_callee_arg_type = get_proper_type(callee.arg_types[i])
# Checking the case that we have more than one item but the first argument
# is an unpack, so this would be something like:
# [Tuple[Unpack[Ts]], int]
#
# In this case we have to check everything together, we do this by re-unifying
# the suffices to the tuple, e.g. a single actual like
# Tuple[Unpack[Ts], int]
expanded_tuple = False
actual_kinds = [arg_kinds[a] for a in actuals]
if len(actuals) > 1:
p_actual_type = get_proper_type(arg_types[actuals[0]])
if (
isinstance(p_actual_type, TupleType)
and len(p_actual_type.items) == 1
and isinstance(p_actual_type.items[0], UnpackType)
and actual_kinds == [nodes.ARG_STAR] + [nodes.ARG_POS] * (len(actuals) - 1)
):
actual_types = [p_actual_type.items[0]] + [arg_types[a] for a in actuals[1:]]
if isinstance(orig_callee_arg_type, UnpackType):
p_callee_type = get_proper_type(orig_callee_arg_type.type)
if isinstance(p_callee_type, TupleType):
assert p_callee_type.items
callee_arg_types = p_callee_type.items
callee_arg_kinds = [nodes.ARG_STAR] + [nodes.ARG_POS] * (
len(p_callee_type.items) - 1
)
expanded_tuple = True
if not expanded_tuple:
actual_types = [arg_types[a] for a in actuals]
if isinstance(orig_callee_arg_type, UnpackType):
unpacked_type = get_proper_type(orig_callee_arg_type.type)
if isinstance(unpacked_type, TupleType):
inner_unpack_index = find_unpack_in_list(unpacked_type.items)
if inner_unpack_index is None:
callee_arg_types = unpacked_type.items
callee_arg_kinds = [ARG_POS] * len(actuals)
else:
inner_unpack = unpacked_type.items[inner_unpack_index]
assert isinstance(inner_unpack, UnpackType)
inner_unpacked_type = get_proper_type(inner_unpack.type)
if isinstance(inner_unpacked_type, TypeVarTupleType):
# This branch mimics the expanded_tuple case above but for
# the case where caller passed a single * unpacked tuple argument.
callee_arg_types = unpacked_type.items
callee_arg_kinds = [
ARG_POS if i != inner_unpack_index else ARG_STAR
for i in range(len(unpacked_type.items))
]
else:
# We assume heterogeneous tuples are desugared earlier.
assert isinstance(inner_unpacked_type, Instance)
assert inner_unpacked_type.type.fullname == "builtins.tuple"
callee_arg_types = (
unpacked_type.items[:inner_unpack_index]
+ [inner_unpacked_type.args[0]]
* (len(actuals) - len(unpacked_type.items) + 1)
+ unpacked_type.items[inner_unpack_index + 1 :]
)
callee_arg_kinds = [ARG_POS] * len(actuals)
elif isinstance(unpacked_type, TypeVarTupleType):
callee_arg_types = [orig_callee_arg_type]
callee_arg_kinds = [ARG_STAR]
else:
assert isinstance(unpacked_type, Instance)
assert unpacked_type.type.fullname == "builtins.tuple"
callee_arg_types = [unpacked_type.args[0]] * len(actuals)
callee_arg_kinds = [ARG_POS] * len(actuals)
else:
callee_arg_types = [orig_callee_arg_type] * len(actuals)
callee_arg_kinds = [callee.arg_kinds[i]] * len(actuals)
assert len(actual_types) == len(actuals) == len(actual_kinds)
if len(callee_arg_types) != len(actual_types):
if len(actual_types) > len(callee_arg_types):
self.chk.msg.too_many_arguments(callee, context)
else:
self.chk.msg.too_few_arguments(callee, context, None)
continue
assert len(callee_arg_types) == len(actual_types)
assert len(callee_arg_types) == len(callee_arg_kinds)
for actual, actual_type, actual_kind, callee_arg_type, callee_arg_kind in zip(
actuals, actual_types, actual_kinds, callee_arg_types, callee_arg_kinds
):
if actual_type is None:
continue # Some kind of error was already reported.
# Check that a *arg is valid as varargs.
if actual_kind == nodes.ARG_STAR and not self.is_valid_var_arg(actual_type):
self.msg.invalid_var_arg(actual_type, context)
if actual_kind == nodes.ARG_STAR2 and not self.is_valid_keyword_var_arg(
actual_type
):
is_mapping = is_subtype(
actual_type, self.chk.named_type("_typeshed.SupportsKeysAndGetItem")
)
self.msg.invalid_keyword_var_arg(actual_type, is_mapping, context)
expanded_actual = mapper.expand_actual_type(
actual_type,
actual_kind,
callee.arg_names[i],
callee_arg_kind,
allow_unpack=isinstance(callee_arg_type, UnpackType),
)
check_arg(
expanded_actual,
actual_type,
actual_kind,
callee_arg_type,
actual + 1,
i + 1,
callee,
object_type,
args[actual],
context,
)
def check_arg(
self,
caller_type: Type,
original_caller_type: Type,
caller_kind: ArgKind,
callee_type: Type,
n: int,
m: int,
callee: CallableType,
object_type: Type | None,
context: Context,
outer_context: Context,
) -> None:
"""Check the type of a single argument in a call."""
caller_type = get_proper_type(caller_type)
original_caller_type = get_proper_type(original_caller_type)
callee_type = get_proper_type(callee_type)
if isinstance(caller_type, DeletedType):
self.msg.deleted_as_rvalue(caller_type, context)
# Only non-abstract non-protocol class can be given where Type[...] is expected...
elif self.has_abstract_type_part(caller_type, callee_type):
self.msg.concrete_only_call(callee_type, context)
elif not is_subtype(caller_type, callee_type, options=self.chk.options):
code = self.msg.incompatible_argument(
n,
m,
callee,
original_caller_type,
caller_kind,
object_type=object_type,
context=context,
outer_context=outer_context,
)
self.msg.incompatible_argument_note(
original_caller_type, callee_type, context, code=code
)
if not self.msg.prefer_simple_messages():
self.chk.check_possible_missing_await(caller_type, callee_type, context, code)
def check_overload_call(
self,
callee: Overloaded,
args: list[Expression],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
callable_name: str | None,
object_type: Type | None,
context: Context,
) -> tuple[Type, Type]:
"""Checks a call to an overloaded function."""
# Normalize unpacked kwargs before checking the call.
callee = callee.with_unpacked_kwargs()
arg_types = self.infer_arg_types_in_empty_context(args)
# Step 1: Filter call targets to remove ones where the argument counts don't match
plausible_targets = self.plausible_overload_call_targets(
arg_types, arg_kinds, arg_names, callee
)
# Step 2: If the arguments contain a union, we try performing union math first,
# instead of picking the first matching overload.
# This is because picking the first overload often ends up being too greedy:
# for example, when we have a fallback alternative that accepts an unrestricted
# typevar. See https://github.com/python/mypy/issues/4063 for related discussion.
erased_targets: list[CallableType] | None = None
unioned_result: tuple[Type, Type] | None = None
# Determine whether we need to encourage union math. This should be generally safe,
# as union math infers better results in the vast majority of cases, but it is very
# computationally intensive.
none_type_var_overlap = self.possible_none_type_var_overlap(arg_types, plausible_targets)
union_interrupted = False # did we try all union combinations?
if any(self.real_union(arg) for arg in arg_types):
try:
with self.msg.filter_errors():
unioned_return = self.union_overload_result(
plausible_targets,
args,
arg_types,
arg_kinds,
arg_names,
callable_name,
object_type,
none_type_var_overlap,
context,
)
except TooManyUnions:
union_interrupted = True
else:
# Record if we succeeded. Next we need to see if maybe normal procedure
# gives a narrower type.
if unioned_return:
returns, inferred_types = zip(*unioned_return)
# Note that we use `combine_function_signatures` instead of just returning
# a union of inferred callables because for example a call
# Union[int -> int, str -> str](Union[int, str]) is invalid and
# we don't want to introduce internal inconsistencies.
unioned_result = (
make_simplified_union(list(returns), context.line, context.column),
self.combine_function_signatures(get_proper_types(inferred_types)),
)
# Step 3: We try checking each branch one-by-one.
inferred_result = self.infer_overload_return_type(
plausible_targets,
args,
arg_types,
arg_kinds,
arg_names,
callable_name,
object_type,
context,
)
# If any of checks succeed, stop early.
if inferred_result is not None and unioned_result is not None:
# Both unioned and direct checks succeeded, choose the more precise type.
if (
is_subtype(inferred_result[0], unioned_result[0])
and not isinstance(get_proper_type(inferred_result[0]), AnyType)
and not none_type_var_overlap
):
return inferred_result
return unioned_result
elif unioned_result is not None:
return unioned_result
elif inferred_result is not None:
return inferred_result
# Step 4: Failure. At this point, we know there is no match. We fall back to trying
# to find a somewhat plausible overload target using the erased types
# so we can produce a nice error message.
#
# For example, suppose the user passes a value of type 'List[str]' into an
# overload with signatures f(x: int) -> int and f(x: List[int]) -> List[int].
#
# Neither alternative matches, but we can guess the user probably wants the
# second one.
erased_targets = self.overload_erased_call_targets(
plausible_targets, arg_types, arg_kinds, arg_names, args, context
)
# Step 5: We try and infer a second-best alternative if possible. If not, fall back
# to using 'Any'.
if len(erased_targets) > 0:
# Pick the first plausible erased target as the fallback
# TODO: Adjust the error message here to make it clear there was no match.
# In order to do this, we need to find a clean way of associating
# a note with whatever error message 'self.check_call' will generate.
# In particular, the note's line and column numbers need to be the same
# as the error's.
target: Type = erased_targets[0]
else:
# There was no plausible match: give up
target = AnyType(TypeOfAny.from_error)
if not is_operator_method(callable_name):
code = None
else:
code = codes.OPERATOR
self.msg.no_variant_matches_arguments(callee, arg_types, context, code=code)
result = self.check_call(
target,
args,
arg_kinds,
context,
arg_names,
callable_name=callable_name,
object_type=object_type,
)
# Do not show the extra error if the union math was forced.
if union_interrupted and not none_type_var_overlap:
self.chk.fail(message_registry.TOO_MANY_UNION_COMBINATIONS, context)
return result
def plausible_overload_call_targets(
self,
arg_types: list[Type],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
overload: Overloaded,
) -> list[CallableType]:
"""Returns all overload call targets that having matching argument counts.
If the given args contains a star-arg (*arg or **kwarg argument, including
ParamSpec), this method will ensure all star-arg overloads appear at the start
of the list, instead of their usual location.
The only exception is if the starred argument is something like a Tuple or a
NamedTuple, which has a definitive "shape". If so, we don't move the corresponding
alternative to the front since we can infer a more precise match using the original
order."""
def has_shape(typ: Type) -> bool:
typ = get_proper_type(typ)
return isinstance(typ, (TupleType, TypedDictType)) or (
isinstance(typ, Instance) and typ.type.is_named_tuple
)
matches: list[CallableType] = []
star_matches: list[CallableType] = []
args_have_var_arg = False
args_have_kw_arg = False
for kind, typ in zip(arg_kinds, arg_types):
if kind == ARG_STAR and not has_shape(typ):
args_have_var_arg = True
if kind == ARG_STAR2 and not has_shape(typ):
args_have_kw_arg = True
for typ in overload.items:
formal_to_actual = map_actuals_to_formals(
arg_kinds, arg_names, typ.arg_kinds, typ.arg_names, lambda i: arg_types[i]
)
with self.msg.filter_errors():
if typ.param_spec() is not None:
# ParamSpec can be expanded in a lot of different ways. We may try
# to expand it here instead, but picking an impossible overload
# is safe: it will be filtered out later.
star_matches.append(typ)
elif self.check_argument_count(
typ, arg_types, arg_kinds, arg_names, formal_to_actual, None
):
if args_have_var_arg and typ.is_var_arg:
star_matches.append(typ)
elif args_have_kw_arg and typ.is_kw_arg:
star_matches.append(typ)
else:
matches.append(typ)
return star_matches + matches
def infer_overload_return_type(
self,
plausible_targets: list[CallableType],
args: list[Expression],
arg_types: list[Type],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
callable_name: str | None,
object_type: Type | None,
context: Context,
) -> tuple[Type, Type] | None:
"""Attempts to find the first matching callable from the given list.
If a match is found, returns a tuple containing the result type and the inferred
callee type. (This tuple is meant to be eventually returned by check_call.)
If multiple targets match due to ambiguous Any parameters, returns (AnyType, AnyType).
If no targets match, returns None.
Assumes all of the given targets have argument counts compatible with the caller.
"""
matches: list[CallableType] = []
return_types: list[Type] = []
inferred_types: list[Type] = []
args_contain_any = any(map(has_any_type, arg_types))
type_maps: list[dict[Expression, Type]] = []
for typ in plausible_targets:
assert self.msg is self.chk.msg
with self.msg.filter_errors() as w:
with self.chk.local_type_map() as m:
ret_type, infer_type = self.check_call(
callee=typ,
args=args,
arg_kinds=arg_kinds,
arg_names=arg_names,
context=context,
callable_name=callable_name,
object_type=object_type,
)
is_match = not w.has_new_errors()
if is_match:
# Return early if possible; otherwise record info, so we can
# check for ambiguity due to 'Any' below.
if not args_contain_any:
self.chk.store_types(m)
return ret_type, infer_type
p_infer_type = get_proper_type(infer_type)
if isinstance(p_infer_type, CallableType):
# Prefer inferred types if possible, this will avoid false triggers for
# Any-ambiguity caused by arguments with Any passed to generic overloads.
matches.append(p_infer_type)
else:
matches.append(typ)
return_types.append(ret_type)
inferred_types.append(infer_type)
type_maps.append(m)
if not matches:
return None
elif any_causes_overload_ambiguity(matches, return_types, arg_types, arg_kinds, arg_names):
# An argument of type or containing the type 'Any' caused ambiguity.
# We try returning a precise type if we can. If not, we give up and just return 'Any'.
if all_same_types(return_types):
self.chk.store_types(type_maps[0])
return return_types[0], inferred_types[0]
elif all_same_types([erase_type(typ) for typ in return_types]):
self.chk.store_types(type_maps[0])
return erase_type(return_types[0]), erase_type(inferred_types[0])
else:
return self.check_call(
callee=AnyType(TypeOfAny.special_form),
args=args,
arg_kinds=arg_kinds,
arg_names=arg_names,
context=context,
callable_name=callable_name,
object_type=object_type,
)
else:
# Success! No ambiguity; return the first match.
self.chk.store_types(type_maps[0])
return return_types[0], inferred_types[0]
def overload_erased_call_targets(
self,
plausible_targets: list[CallableType],
arg_types: list[Type],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
args: list[Expression],
context: Context,
) -> list[CallableType]:
"""Returns a list of all targets that match the caller after erasing types.
Assumes all of the given targets have argument counts compatible with the caller.
"""
matches: list[CallableType] = []
for typ in plausible_targets:
if self.erased_signature_similarity(
arg_types, arg_kinds, arg_names, args, typ, context
):
matches.append(typ)
return matches
def possible_none_type_var_overlap(
self, arg_types: list[Type], plausible_targets: list[CallableType]
) -> bool:
"""Heuristic to determine whether we need to try forcing union math.
This is needed to avoid greedy type variable match in situations like this:
@overload
def foo(x: None) -> None: ...
@overload
def foo(x: T) -> list[T]: ...
x: int | None
foo(x)
we want this call to infer list[int] | None, not list[int | None].
"""
if not plausible_targets or not arg_types:
return False
has_optional_arg = False
for arg_type in get_proper_types(arg_types):
if not isinstance(arg_type, UnionType):
continue
for item in get_proper_types(arg_type.items):
if isinstance(item, NoneType):
has_optional_arg = True
break
if not has_optional_arg:
return False
min_prefix = min(len(c.arg_types) for c in plausible_targets)
for i in range(min_prefix):
if any(
isinstance(get_proper_type(c.arg_types[i]), NoneType) for c in plausible_targets
) and any(
isinstance(get_proper_type(c.arg_types[i]), TypeVarType) for c in plausible_targets
):
return True
return False
def union_overload_result(
self,
plausible_targets: list[CallableType],
args: list[Expression],
arg_types: list[Type],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
callable_name: str | None,
object_type: Type | None,
none_type_var_overlap: bool,
context: Context,
level: int = 0,
) -> list[tuple[Type, Type]] | None:
"""Accepts a list of overload signatures and attempts to match calls by destructuring
the first union.
Return a list of (<return type>, <inferred variant type>) if call succeeds for every
item of the desctructured union. Returns None if there is no match.
"""
# Step 1: If we are already too deep, then stop immediately. Otherwise mypy might
# hang for long time because of a weird overload call. The caller will get
# the exception and generate an appropriate note message, if needed.
if level >= MAX_UNIONS:
raise TooManyUnions
# Step 2: Find position of the first union in arguments. Return the normal inferred
# type if no more unions left.
for idx, typ in enumerate(arg_types):
if self.real_union(typ):
break
else:
# No unions in args, just fall back to normal inference
with self.type_overrides_set(args, arg_types):
res = self.infer_overload_return_type(
plausible_targets,
args,
arg_types,
arg_kinds,
arg_names,
callable_name,
object_type,
context,
)
if res is not None:
return [res]
return None
# Step 3: Try a direct match before splitting to avoid unnecessary union splits
# and save performance.
if not none_type_var_overlap:
with self.type_overrides_set(args, arg_types):
direct = self.infer_overload_return_type(
plausible_targets,
args,
arg_types,
arg_kinds,
arg_names,
callable_name,
object_type,
context,
)
if direct is not None and not isinstance(
get_proper_type(direct[0]), (UnionType, AnyType)
):
# We only return non-unions soon, to avoid greedy match.
return [direct]
# Step 4: Split the first remaining union type in arguments into items and
# try to match each item individually (recursive).
first_union = get_proper_type(arg_types[idx])
assert isinstance(first_union, UnionType)
res_items = []
for item in first_union.relevant_items():
new_arg_types = arg_types.copy()
new_arg_types[idx] = item
sub_result = self.union_overload_result(
plausible_targets,
args,
new_arg_types,
arg_kinds,
arg_names,
callable_name,
object_type,
none_type_var_overlap,
context,
level + 1,
)
if sub_result is not None:
res_items.extend(sub_result)
else:
# Some item doesn't match, return soon.
return None
# Step 5: If splitting succeeded, then filter out duplicate items before returning.
seen: set[tuple[Type, Type]] = set()
result = []
for pair in res_items:
if pair not in seen:
seen.add(pair)
result.append(pair)
return result
def real_union(self, typ: Type) -> bool:
typ = get_proper_type(typ)
return isinstance(typ, UnionType) and len(typ.relevant_items()) > 1
@contextmanager
def type_overrides_set(
self, exprs: Sequence[Expression], overrides: Sequence[Type]
) -> Iterator[None]:
"""Set _temporary_ type overrides for given expressions."""
assert len(exprs) == len(overrides)
for expr, typ in zip(exprs, overrides):
self.type_overrides[expr] = typ
try:
yield
finally:
for expr in exprs:
del self.type_overrides[expr]
def combine_function_signatures(self, types: list[ProperType]) -> AnyType | CallableType:
"""Accepts a list of function signatures and attempts to combine them together into a
new CallableType consisting of the union of all of the given arguments and return types.
If there is at least one non-callable type, return Any (this can happen if there is
an ambiguity because of Any in arguments).
"""
assert types, "Trying to merge no callables"
if not all(isinstance(c, CallableType) for c in types):
return AnyType(TypeOfAny.special_form)
callables = cast("list[CallableType]", types)
if len(callables) == 1:
return callables[0]
# Note: we are assuming here that if a user uses some TypeVar 'T' in
# two different functions, they meant for that TypeVar to mean the
# same thing.
#
# This function will make sure that all instances of that TypeVar 'T'
# refer to the same underlying TypeVarType objects to simplify the union-ing
# logic below.
#
# (If the user did *not* mean for 'T' to be consistently bound to the
# same type in their overloads, well, their code is probably too
# confusing and ought to be re-written anyways.)
callables, variables = merge_typevars_in_callables_by_name(callables)
new_args: list[list[Type]] = [[] for _ in range(len(callables[0].arg_types))]
new_kinds = list(callables[0].arg_kinds)
new_returns: list[Type] = []
too_complex = False
for target in callables:
# We fall back to Callable[..., Union[<returns>]] if the functions do not have
# the exact same signature. The only exception is if one arg is optional and
# the other is positional: in that case, we continue unioning (and expect a
# positional arg).
# TODO: Enhance the merging logic to handle a wider variety of signatures.
if len(new_kinds) != len(target.arg_kinds):
too_complex = True
break
for i, (new_kind, target_kind) in enumerate(zip(new_kinds, target.arg_kinds)):
if new_kind == target_kind:
continue
elif new_kind.is_positional() and target_kind.is_positional():
new_kinds[i] = ARG_POS
else:
too_complex = True
break
if too_complex:
break # outer loop
for i, arg in enumerate(target.arg_types):
new_args[i].append(arg)
new_returns.append(target.ret_type)
union_return = make_simplified_union(new_returns)
if too_complex:
any = AnyType(TypeOfAny.special_form)
return callables[0].copy_modified(
arg_types=[any, any],
arg_kinds=[ARG_STAR, ARG_STAR2],
arg_names=[None, None],
ret_type=union_return,
variables=variables,
implicit=True,
)
final_args = []
for args_list in new_args:
new_type = make_simplified_union(args_list)
final_args.append(new_type)
return callables[0].copy_modified(
arg_types=final_args,
arg_kinds=new_kinds,
ret_type=union_return,
variables=variables,
implicit=True,
)
def erased_signature_similarity(
self,
arg_types: list[Type],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
args: list[Expression],
callee: CallableType,
context: Context,
) -> bool:
"""Determine whether arguments could match the signature at runtime, after
erasing types."""
formal_to_actual = map_actuals_to_formals(
arg_kinds, arg_names, callee.arg_kinds, callee.arg_names, lambda i: arg_types[i]
)
with self.msg.filter_errors():
if not self.check_argument_count(
callee, arg_types, arg_kinds, arg_names, formal_to_actual, None
):
# Too few or many arguments -> no match.
return False
def check_arg(
caller_type: Type,
original_ccaller_type: Type,
caller_kind: ArgKind,
callee_type: Type,
n: int,
m: int,
callee: CallableType,
object_type: Type | None,
context: Context,
outer_context: Context,
) -> None:
if not arg_approximate_similarity(caller_type, callee_type):
# No match -- exit early since none of the remaining work can change
# the result.
raise Finished
try:
self.check_argument_types(
arg_types,
arg_kinds,
args,
callee,
formal_to_actual,
context=context,
check_arg=check_arg,
)
return True
except Finished:
return False
def apply_generic_arguments(
self,
callable: CallableType,
types: Sequence[Type | None],
context: Context,
skip_unsatisfied: bool = False,
) -> CallableType:
"""Simple wrapper around mypy.applytype.apply_generic_arguments."""
return applytype.apply_generic_arguments(
callable,
types,
self.msg.incompatible_typevar_value,
context,
skip_unsatisfied=skip_unsatisfied,
)
def check_any_type_call(self, args: list[Expression], callee: Type) -> tuple[Type, Type]:
self.infer_arg_types_in_empty_context(args)
callee = get_proper_type(callee)
if isinstance(callee, AnyType):
return (
AnyType(TypeOfAny.from_another_any, source_any=callee),
AnyType(TypeOfAny.from_another_any, source_any=callee),
)
else:
return AnyType(TypeOfAny.special_form), AnyType(TypeOfAny.special_form)
def check_union_call(
self,
callee: UnionType,
args: list[Expression],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
context: Context,
) -> tuple[Type, Type]:
with self.msg.disable_type_names():
results = [
self.check_call(subtype, args, arg_kinds, context, arg_names)
for subtype in callee.relevant_items()
]
return (make_simplified_union([res[0] for res in results]), callee)
def visit_member_expr(self, e: MemberExpr, is_lvalue: bool = False) -> Type:
"""Visit member expression (of form e.id)."""
self.chk.module_refs.update(extract_refexpr_names(e))
result = self.analyze_ordinary_member_access(e, is_lvalue)
return self.narrow_type_from_binder(e, result)
def analyze_ordinary_member_access(self, e: MemberExpr, is_lvalue: bool) -> Type:
"""Analyse member expression or member lvalue."""
if e.kind is not None:
# This is a reference to a module attribute.
return self.analyze_ref_expr(e)
else:
# This is a reference to a non-module attribute.
original_type = self.accept(e.expr, is_callee=self.is_callee)
base = e.expr
module_symbol_table = None
if isinstance(base, RefExpr) and isinstance(base.node, MypyFile):
module_symbol_table = base.node.names
if isinstance(base, RefExpr) and isinstance(base.node, Var):
# This is needed to special case self-types, so we don't need to track
# these flags separately in checkmember.py.
is_self = base.node.is_self or base.node.is_cls
else:
is_self = False
member_type = analyze_member_access(
e.name,
original_type,
e,
is_lvalue=is_lvalue,
is_super=False,
is_operator=False,
msg=self.msg,
original_type=original_type,
chk=self.chk,
in_literal_context=self.is_literal_context(),
module_symbol_table=module_symbol_table,
is_self=is_self,
)
return member_type
def analyze_external_member_access(
self, member: str, base_type: Type, context: Context
) -> Type:
"""Analyse member access that is external, i.e. it cannot
refer to private definitions. Return the result type.
"""
# TODO remove; no private definitions in mypy
return analyze_member_access(
member,
base_type,
context,
is_lvalue=False,
is_super=False,
is_operator=False,
msg=self.msg,
original_type=base_type,
chk=self.chk,
in_literal_context=self.is_literal_context(),
)
def is_literal_context(self) -> bool:
return is_literal_type_like(self.type_context[-1])
def infer_literal_expr_type(self, value: LiteralValue, fallback_name: str) -> Type:
"""Analyzes the given literal expression and determines if we should be
inferring an Instance type, a Literal[...] type, or an Instance that
remembers the original literal. We...
1. ...Infer a normal Instance in most circumstances.
2. ...Infer a Literal[...] if we're in a literal context. For example, if we
were analyzing the "3" in "foo(3)" where "foo" has a signature of
"def foo(Literal[3]) -> None", we'd want to infer that the "3" has a
type of Literal[3] instead of Instance.
3. ...Infer an Instance that remembers the original Literal if we're declaring
a Final variable with an inferred type -- for example, "bar" in "bar: Final = 3"
would be assigned an Instance that remembers it originated from a '3'. See
the comments in Instance's constructor for more details.
"""
typ = self.named_type(fallback_name)
if self.is_literal_context():
return LiteralType(value=value, fallback=typ)
else:
return typ.copy_modified(
last_known_value=LiteralType(
value=value, fallback=typ, line=typ.line, column=typ.column
)
)
def concat_tuples(self, left: TupleType, right: TupleType) -> TupleType:
"""Concatenate two fixed length tuples."""
assert not (find_unpack_in_list(left.items) and find_unpack_in_list(right.items))
return TupleType(
items=left.items + right.items, fallback=self.named_type("builtins.tuple")
)
def visit_int_expr(self, e: IntExpr) -> Type:
"""Type check an integer literal (trivial)."""
return self.infer_literal_expr_type(e.value, "builtins.int")
def visit_str_expr(self, e: StrExpr) -> Type:
"""Type check a string literal (trivial)."""
return self.infer_literal_expr_type(e.value, "builtins.str")
def visit_bytes_expr(self, e: BytesExpr) -> Type:
"""Type check a bytes literal (trivial)."""
return self.infer_literal_expr_type(e.value, "builtins.bytes")
def visit_float_expr(self, e: FloatExpr) -> Type:
"""Type check a float literal (trivial)."""
return self.named_type("builtins.float")
def visit_complex_expr(self, e: ComplexExpr) -> Type:
"""Type check a complex literal."""
return self.named_type("builtins.complex")
def visit_ellipsis(self, e: EllipsisExpr) -> Type:
"""Type check '...'."""
return self.named_type("builtins.ellipsis")
def visit_op_expr(self, e: OpExpr) -> Type:
"""Type check a binary operator expression."""
if e.analyzed:
# It's actually a type expression X | Y.
return self.accept(e.analyzed)
if e.op == "and" or e.op == "or":
return self.check_boolean_op(e, e)
if e.op == "*" and isinstance(e.left, ListExpr):
# Expressions of form [...] * e get special type inference.
return self.check_list_multiply(e)
if e.op == "%":
if isinstance(e.left, BytesExpr):
return self.strfrm_checker.check_str_interpolation(e.left, e.right)
if isinstance(e.left, StrExpr):
return self.strfrm_checker.check_str_interpolation(e.left, e.right)
left_type = self.accept(e.left)
proper_left_type = get_proper_type(left_type)
if isinstance(proper_left_type, TupleType) and e.op == "+":
left_add_method = proper_left_type.partial_fallback.type.get("__add__")
if left_add_method and left_add_method.fullname == "builtins.tuple.__add__":
proper_right_type = get_proper_type(self.accept(e.right))
if isinstance(proper_right_type, TupleType):
right_radd_method = proper_right_type.partial_fallback.type.get("__radd__")
if right_radd_method is None:
# One cannot have two variadic items in the same tuple.
if (
find_unpack_in_list(proper_left_type.items) is None
or find_unpack_in_list(proper_right_type.items) is None
):
return self.concat_tuples(proper_left_type, proper_right_type)
elif (
PRECISE_TUPLE_TYPES in self.chk.options.enable_incomplete_feature
and isinstance(proper_right_type, Instance)
and self.chk.type_is_iterable(proper_right_type)
):
# Handle tuple[X, Y] + tuple[Z, ...] = tuple[X, Y, *tuple[Z, ...]].
right_radd_method = proper_right_type.type.get("__radd__")
if (
right_radd_method is None
and proper_left_type.partial_fallback.type.fullname == "builtins.tuple"
and find_unpack_in_list(proper_left_type.items) is None
):
item_type = self.chk.iterable_item_type(proper_right_type, e)
mapped = self.chk.named_generic_type("builtins.tuple", [item_type])
return proper_left_type.copy_modified(
items=proper_left_type.items + [UnpackType(mapped)]
)
use_reverse: UseReverse = USE_REVERSE_DEFAULT
if e.op == "|":
if is_named_instance(proper_left_type, "builtins.dict"):
# This is a special case for `dict | TypedDict`.
# 1. Find `dict | TypedDict` case
# 2. Switch `dict.__or__` to `TypedDict.__ror__` (the same from both runtime and typing perspective)
proper_right_type = get_proper_type(self.accept(e.right))
if isinstance(proper_right_type, TypedDictType):
use_reverse = USE_REVERSE_ALWAYS
if isinstance(proper_left_type, TypedDictType):
# This is the reverse case: `TypedDict | dict`,
# simply do not allow the reverse checking:
# do not call `__dict__.__ror__`.
proper_right_type = get_proper_type(self.accept(e.right))
if is_named_instance(proper_right_type, "builtins.dict"):
use_reverse = USE_REVERSE_NEVER
if PRECISE_TUPLE_TYPES in self.chk.options.enable_incomplete_feature:
# Handle tuple[X, ...] + tuple[Y, Z] = tuple[*tuple[X, ...], Y, Z].
if (
e.op == "+"
and isinstance(proper_left_type, Instance)
and proper_left_type.type.fullname == "builtins.tuple"
):
proper_right_type = get_proper_type(self.accept(e.right))
if (
isinstance(proper_right_type, TupleType)
and proper_right_type.partial_fallback.type.fullname == "builtins.tuple"
and find_unpack_in_list(proper_right_type.items) is None
):
return proper_right_type.copy_modified(
items=[UnpackType(proper_left_type)] + proper_right_type.items
)
if e.op in operators.op_methods:
method = operators.op_methods[e.op]
if use_reverse is UseReverse.DEFAULT or use_reverse is UseReverse.NEVER:
result, method_type = self.check_op(
method,
base_type=left_type,
arg=e.right,
context=e,
allow_reverse=use_reverse is UseReverse.DEFAULT,
)
elif use_reverse is UseReverse.ALWAYS:
result, method_type = self.check_op(
# The reverse operator here gives better error messages:
operators.reverse_op_methods[method],
base_type=self.accept(e.right),
arg=e.left,
context=e,
allow_reverse=False,
)
else:
assert_never(use_reverse)
e.method_type = method_type
return result
else:
raise RuntimeError(f"Unknown operator {e.op}")
def visit_comparison_expr(self, e: ComparisonExpr) -> Type:
"""Type check a comparison expression.
Comparison expressions are type checked consecutive-pair-wise
That is, 'a < b > c == d' is check as 'a < b and b > c and c == d'
"""
result: Type | None = None
sub_result: Type
# Check each consecutive operand pair and their operator
for left, right, operator in zip(e.operands, e.operands[1:], e.operators):
left_type = self.accept(left)
if operator == "in" or operator == "not in":
# This case covers both iterables and containers, which have different meanings.
# For a container, the in operator calls the __contains__ method.
# For an iterable, the in operator iterates over the iterable, and compares each item one-by-one.
# We allow `in` for a union of containers and iterables as long as at least one of them matches the
# type of the left operand, as the operation will simply return False if the union's container/iterator
# type doesn't match the left operand.
# If the right operand has partial type, look it up without triggering
# a "Need type annotation ..." message, as it would be noise.
right_type = self.find_partial_type_ref_fast_path(right)
if right_type is None:
right_type = self.accept(right) # Validate the right operand
right_type = get_proper_type(right_type)
item_types: Sequence[Type] = [right_type]
if isinstance(right_type, UnionType):
item_types = list(right_type.relevant_items())
sub_result = self.bool_type()
container_types: list[Type] = []
iterable_types: list[Type] = []
failed_out = False
encountered_partial_type = False
for item_type in item_types:
# Keep track of whether we get type check errors (these won't be reported, they
# are just to verify whether something is valid typing wise).
with self.msg.filter_errors(save_filtered_errors=True) as container_errors:
_, method_type = self.check_method_call_by_name(
method="__contains__",
base_type=item_type,
args=[left],
arg_kinds=[ARG_POS],
context=e,
original_type=right_type,
)
# Container item type for strict type overlap checks. Note: we need to only
# check for nominal type, because a usual "Unsupported operands for in"
# will be reported for types incompatible with __contains__().
# See testCustomContainsCheckStrictEquality for an example.
cont_type = self.chk.analyze_container_item_type(item_type)
if isinstance(item_type, PartialType):
# We don't really know if this is an error or not, so just shut up.
encountered_partial_type = True
pass
elif (
container_errors.has_new_errors()
and
# is_valid_var_arg is True for any Iterable
self.is_valid_var_arg(item_type)
):
# it's not a container, but it is an iterable
with self.msg.filter_errors(save_filtered_errors=True) as iterable_errors:
_, itertype = self.chk.analyze_iterable_item_type_without_expression(
item_type, e
)
if iterable_errors.has_new_errors():
self.msg.add_errors(iterable_errors.filtered_errors())
failed_out = True
else:
method_type = CallableType(
[left_type],
[nodes.ARG_POS],
[None],
self.bool_type(),
self.named_type("builtins.function"),
)
e.method_types.append(method_type)
iterable_types.append(itertype)
elif not container_errors.has_new_errors() and cont_type:
container_types.append(cont_type)
e.method_types.append(method_type)
else:
self.msg.add_errors(container_errors.filtered_errors())
failed_out = True
if not encountered_partial_type and not failed_out:
iterable_type = UnionType.make_union(iterable_types)
if not is_subtype(left_type, iterable_type):
if not container_types:
self.msg.unsupported_operand_types("in", left_type, right_type, e)
else:
container_type = UnionType.make_union(container_types)
if self.dangerous_comparison(
left_type,
container_type,
original_container=right_type,
prefer_literal=False,
):
self.msg.dangerous_comparison(
left_type, container_type, "container", e
)
elif operator in operators.op_methods:
method = operators.op_methods[operator]
with ErrorWatcher(self.msg.errors) as w:
sub_result, method_type = self.check_op(
method, left_type, right, e, allow_reverse=True
)
e.method_types.append(method_type)
# Only show dangerous overlap if there are no other errors. See
# testCustomEqCheckStrictEquality for an example.
if not w.has_new_errors() and operator in ("==", "!="):
right_type = self.accept(right)
if self.dangerous_comparison(left_type, right_type):
# Show the most specific literal types possible
left_type = try_getting_literal(left_type)
right_type = try_getting_literal(right_type)
self.msg.dangerous_comparison(left_type, right_type, "equality", e)
elif operator == "is" or operator == "is not":
right_type = self.accept(right) # validate the right operand
sub_result = self.bool_type()
if self.dangerous_comparison(left_type, right_type):
# Show the most specific literal types possible
left_type = try_getting_literal(left_type)
right_type = try_getting_literal(right_type)
self.msg.dangerous_comparison(left_type, right_type, "identity", e)
e.method_types.append(None)
else:
raise RuntimeError(f"Unknown comparison operator {operator}")
# Determine type of boolean-and of result and sub_result
if result is None:
result = sub_result
else:
result = join.join_types(result, sub_result)
assert result is not None
return result
def find_partial_type_ref_fast_path(self, expr: Expression) -> Type | None:
"""If expression has a partial generic type, return it without additional checks.
In particular, this does not generate an error about a missing annotation.
Otherwise, return None.
"""
if not isinstance(expr, RefExpr):
return None
if isinstance(expr.node, Var):
result = self.analyze_var_ref(expr.node, expr)
if isinstance(result, PartialType) and result.type is not None:
self.chk.store_type(expr, fixup_partial_type(result))
return result
return None
def dangerous_comparison(
self,
left: Type,
right: Type,
*,
original_container: Type | None = None,
seen_types: set[tuple[Type, Type]] | None = None,
prefer_literal: bool = True,
) -> bool:
"""Check for dangerous non-overlapping comparisons like 42 == 'no'.
The original_container is the original container type for 'in' checks
(and None for equality checks).
Rules:
* X and None are overlapping even in strict-optional mode. This is to allow
'assert x is not None' for x defined as 'x = None # type: str' in class body
(otherwise mypy itself would have couple dozen errors because of this).
* Optional[X] and Optional[Y] are non-overlapping if X and Y are
non-overlapping, although technically None is overlap, it is most
likely an error.
* Any overlaps with everything, i.e. always safe.
* Special case: b'abc' in b'cde' is safe.
"""
if not self.chk.options.strict_equality:
return False
if seen_types is None:
seen_types = set()
if (left, right) in seen_types:
return False
seen_types.add((left, right))
left, right = get_proper_types((left, right))
# We suppress the error if there is a custom __eq__() method on either
# side. User defined (or even standard library) classes can define this
# to return True for comparisons between non-overlapping types.
if custom_special_method(left, "__eq__") or custom_special_method(right, "__eq__"):
return False
if prefer_literal:
# Also flag non-overlapping literals in situations like:
# x: Literal['a', 'b']
# if x == 'c':
# ...
left = try_getting_literal(left)
right = try_getting_literal(right)
if self.chk.binder.is_unreachable_warning_suppressed():
# We are inside a function that contains type variables with value restrictions in
# its signature. In this case we just suppress all strict-equality checks to avoid
# false positives for code like:
#
# T = TypeVar('T', str, int)
# def f(x: T) -> T:
# if x == 0:
# ...
# return x
#
# TODO: find a way of disabling the check only for types resulted from the expansion.
return False
if isinstance(left, NoneType) or isinstance(right, NoneType):
return False
if isinstance(left, UnionType) and isinstance(right, UnionType):
left = remove_optional(left)
right = remove_optional(right)
left, right = get_proper_types((left, right))
if (
original_container
and has_bytes_component(original_container)
and has_bytes_component(left)
):
# We need to special case bytes and bytearray, because 97 in b'abc', b'a' in b'abc',
# b'a' in bytearray(b'abc') etc. all return True (and we want to show the error only
# if the check can _never_ be True).
return False
if isinstance(left, Instance) and isinstance(right, Instance):
# Special case some builtin implementations of AbstractSet.
left_name = left.type.fullname
right_name = right.type.fullname
if (
left_name in OVERLAPPING_TYPES_ALLOWLIST
and right_name in OVERLAPPING_TYPES_ALLOWLIST
):
abstract_set = self.chk.lookup_typeinfo("typing.AbstractSet")
left = map_instance_to_supertype(left, abstract_set)
right = map_instance_to_supertype(right, abstract_set)
return self.dangerous_comparison(
left.args[0], right.args[0], seen_types=seen_types
)
elif left.type.has_base("typing.Mapping") and right.type.has_base("typing.Mapping"):
# Similar to above: Mapping ignores the classes, it just compares items.
abstract_map = self.chk.lookup_typeinfo("typing.Mapping")
left = map_instance_to_supertype(left, abstract_map)
right = map_instance_to_supertype(right, abstract_map)
return self.dangerous_comparison(
left.args[0], right.args[0], seen_types=seen_types
) or self.dangerous_comparison(left.args[1], right.args[1], seen_types=seen_types)
elif left_name in ("builtins.list", "builtins.tuple") and right_name == left_name:
return self.dangerous_comparison(
left.args[0], right.args[0], seen_types=seen_types
)
elif left_name in OVERLAPPING_BYTES_ALLOWLIST and right_name in (
OVERLAPPING_BYTES_ALLOWLIST
):
return False
if isinstance(left, LiteralType) and isinstance(right, LiteralType):
if isinstance(left.value, bool) and isinstance(right.value, bool):
# Comparing different booleans is not dangerous.
return False
return not is_overlapping_types(left, right, ignore_promotions=False)
def check_method_call_by_name(
self,
method: str,
base_type: Type,
args: list[Expression],
arg_kinds: list[ArgKind],
context: Context,
original_type: Type | None = None,
) -> tuple[Type, Type]:
"""Type check a call to a named method on an object.
Return tuple (result type, inferred method type). The 'original_type'
is used for error messages.
"""
original_type = original_type or base_type
# Unions are special-cased to allow plugins to act on each element of the union.
base_type = get_proper_type(base_type)
if isinstance(base_type, UnionType):
return self.check_union_method_call_by_name(
method, base_type, args, arg_kinds, context, original_type
)
method_type = analyze_member_access(
method,
base_type,
context,
is_lvalue=False,
is_super=False,
is_operator=True,
msg=self.msg,
original_type=original_type,
self_type=base_type,
chk=self.chk,
in_literal_context=self.is_literal_context(),
)
return self.check_method_call(method, base_type, method_type, args, arg_kinds, context)
def check_union_method_call_by_name(
self,
method: str,
base_type: UnionType,
args: list[Expression],
arg_kinds: list[ArgKind],
context: Context,
original_type: Type | None = None,
) -> tuple[Type, Type]:
"""Type check a call to a named method on an object with union type.
This essentially checks the call using check_method_call_by_name() for each
union item and unions the result. We do this to allow plugins to act on
individual union items.
"""
res: list[Type] = []
meth_res: list[Type] = []
for typ in base_type.relevant_items():
# Format error messages consistently with
# mypy.checkmember.analyze_union_member_access().
with self.msg.disable_type_names():
item, meth_item = self.check_method_call_by_name(
method, typ, args, arg_kinds, context, original_type
)
res.append(item)
meth_res.append(meth_item)
return make_simplified_union(res), make_simplified_union(meth_res)
def check_method_call(
self,
method_name: str,
base_type: Type,
method_type: Type,
args: list[Expression],
arg_kinds: list[ArgKind],
context: Context,
) -> tuple[Type, Type]:
"""Type check a call to a method with the given name and type on an object.
Return tuple (result type, inferred method type).
"""
callable_name = self.method_fullname(base_type, method_name)
object_type = base_type if callable_name is not None else None
# Try to refine the method signature using plugin hooks before checking the call.
method_type = self.transform_callee_type(
callable_name, method_type, args, arg_kinds, context, object_type=object_type
)
return self.check_call(
method_type,
args,
arg_kinds,
context,
callable_name=callable_name,
object_type=base_type,
)
def check_op_reversible(
self,
op_name: str,
left_type: Type,
left_expr: Expression,
right_type: Type,
right_expr: Expression,
context: Context,
) -> tuple[Type, Type]:
def lookup_operator(op_name: str, base_type: Type) -> Type | None:
"""Looks up the given operator and returns the corresponding type,
if it exists."""
# This check is an important performance optimization,
# even though it is mostly a subset of
# analyze_member_access.
# TODO: Find a way to remove this call without performance implications.
if not self.has_member(base_type, op_name):
return None
with self.msg.filter_errors() as w:
member = analyze_member_access(
name=op_name,
typ=base_type,
is_lvalue=False,
is_super=False,
is_operator=True,
original_type=base_type,
context=context,
msg=self.msg,
chk=self.chk,
in_literal_context=self.is_literal_context(),
)
return None if w.has_new_errors() else member
def lookup_definer(typ: Instance, attr_name: str) -> str | None:
"""Returns the name of the class that contains the actual definition of attr_name.
So if class A defines foo and class B subclasses A, running
'get_class_defined_in(B, "foo")` would return the full name of A.
However, if B were to override and redefine foo, that method call would
return the full name of B instead.
If the attr name is not present in the given class or its MRO, returns None.
"""
for cls in typ.type.mro:
if cls.names.get(attr_name):
return cls.fullname
return None
left_type = get_proper_type(left_type)
right_type = get_proper_type(right_type)
# If either the LHS or the RHS are Any, we can't really concluding anything
# about the operation since the Any type may or may not define an
# __op__ or __rop__ method. So, we punt and return Any instead.
if isinstance(left_type, AnyType):
any_type = AnyType(TypeOfAny.from_another_any, source_any=left_type)
return any_type, any_type
if isinstance(right_type, AnyType):
any_type = AnyType(TypeOfAny.from_another_any, source_any=right_type)
return any_type, any_type
# STEP 1:
# We start by getting the __op__ and __rop__ methods, if they exist.
rev_op_name = operators.reverse_op_methods[op_name]
left_op = lookup_operator(op_name, left_type)
right_op = lookup_operator(rev_op_name, right_type)
# STEP 2a:
# We figure out in which order Python will call the operator methods. As it
# turns out, it's not as simple as just trying to call __op__ first and
# __rop__ second.
#
# We store the determined order inside the 'variants_raw' variable,
# which records tuples containing the method, base type, and the argument.
if op_name in operators.op_methods_that_shortcut and is_same_type(left_type, right_type):
# When we do "A() + A()", for example, Python will only call the __add__ method,
# never the __radd__ method.
#
# This is the case even if the __add__ method is completely missing and the __radd__
# method is defined.
variants_raw = [(left_op, left_type, right_expr)]
elif (
is_subtype(right_type, left_type)
and isinstance(left_type, Instance)
and isinstance(right_type, Instance)
and not (
left_type.type.alt_promote is not None
and left_type.type.alt_promote.type is right_type.type
)
and lookup_definer(left_type, op_name) != lookup_definer(right_type, rev_op_name)
):
# When we do "A() + B()" where B is a subclass of A, we'll actually try calling
# B's __radd__ method first, but ONLY if B explicitly defines or overrides the
# __radd__ method.
#
# This mechanism lets subclasses "refine" the expected outcome of the operation, even
# if they're located on the RHS.
#
# As a special case, the alt_promote check makes sure that we don't use the
# __radd__ method of int if the LHS is a native int type.
variants_raw = [(right_op, right_type, left_expr), (left_op, left_type, right_expr)]
else:
# In all other cases, we do the usual thing and call __add__ first and
# __radd__ second when doing "A() + B()".
variants_raw = [(left_op, left_type, right_expr), (right_op, right_type, left_expr)]
# STEP 3:
# We now filter out all non-existent operators. The 'variants' list contains
# all operator methods that are actually present, in the order that Python
# attempts to invoke them.
variants = [(op, obj, arg) for (op, obj, arg) in variants_raw if op is not None]
# STEP 4:
# We now try invoking each one. If an operation succeeds, end early and return
# the corresponding result. Otherwise, return the result and errors associated
# with the first entry.
errors = []
results = []
for method, obj, arg in variants:
with self.msg.filter_errors(save_filtered_errors=True) as local_errors:
result = self.check_method_call(op_name, obj, method, [arg], [ARG_POS], context)
if local_errors.has_new_errors():
errors.append(local_errors.filtered_errors())
results.append(result)
else:
return result
# We finish invoking above operators and no early return happens. Therefore,
# we check if either the LHS or the RHS is Instance and fallbacks to Any,
# if so, we also return Any
if (isinstance(left_type, Instance) and left_type.type.fallback_to_any) or (
isinstance(right_type, Instance) and right_type.type.fallback_to_any
):
any_type = AnyType(TypeOfAny.special_form)
return any_type, any_type
# STEP 4b:
# Sometimes, the variants list is empty. In that case, we fall-back to attempting to
# call the __op__ method (even though it's missing).
if not variants:
with self.msg.filter_errors(save_filtered_errors=True) as local_errors:
result = self.check_method_call_by_name(
op_name, left_type, [right_expr], [ARG_POS], context
)
if local_errors.has_new_errors():
errors.append(local_errors.filtered_errors())
results.append(result)
else:
# In theory, we should never enter this case, but it seems
# we sometimes do, when dealing with Type[...]? E.g. see
# check-classes.testTypeTypeComparisonWorks.
#
# This is probably related to the TODO in lookup_operator(...)
# up above.
#
# TODO: Remove this extra case
return result
self.msg.add_errors(errors[0])
if len(results) == 1:
return results[0]
else:
error_any = AnyType(TypeOfAny.from_error)
result = error_any, error_any
return result
def check_op(
self,
method: str,
base_type: Type,
arg: Expression,
context: Context,
allow_reverse: bool = False,
) -> tuple[Type, Type]:
"""Type check a binary operation which maps to a method call.
Return tuple (result type, inferred operator method type).
"""
if allow_reverse:
left_variants = [base_type]
base_type = get_proper_type(base_type)
if isinstance(base_type, UnionType):
left_variants = list(flatten_nested_unions(base_type.relevant_items()))
right_type = self.accept(arg)
# Step 1: We first try leaving the right arguments alone and destructure
# just the left ones. (Mypy can sometimes perform some more precise inference
# if we leave the right operands a union -- see testOperatorWithEmptyListAndSum.)
all_results = []
all_inferred = []
with self.msg.filter_errors() as local_errors:
for left_possible_type in left_variants:
result, inferred = self.check_op_reversible(
op_name=method,
left_type=left_possible_type,
left_expr=TempNode(left_possible_type, context=context),
right_type=right_type,
right_expr=arg,
context=context,
)
all_results.append(result)
all_inferred.append(inferred)
if not local_errors.has_new_errors():
results_final = make_simplified_union(all_results)
inferred_final = make_simplified_union(all_inferred)
return results_final, inferred_final
# Step 2: If that fails, we try again but also destructure the right argument.
# This is also necessary to make certain edge cases work -- see
# testOperatorDoubleUnionInterwovenUnionAdd, for example.
# Note: We want to pass in the original 'arg' for 'left_expr' and 'right_expr'
# whenever possible so that plugins and similar things can introspect on the original
# node if possible.
#
# We don't do the same for the base expression because it could lead to weird
# type inference errors -- e.g. see 'testOperatorDoubleUnionSum'.
# TODO: Can we use `type_overrides_set()` here?
right_variants = [(right_type, arg)]
right_type = get_proper_type(right_type)
if isinstance(right_type, UnionType):
right_variants = [
(item, TempNode(item, context=context))
for item in flatten_nested_unions(right_type.relevant_items())
]
all_results = []
all_inferred = []
with self.msg.filter_errors(save_filtered_errors=True) as local_errors:
for left_possible_type in left_variants:
for right_possible_type, right_expr in right_variants:
result, inferred = self.check_op_reversible(
op_name=method,
left_type=left_possible_type,
left_expr=TempNode(left_possible_type, context=context),
right_type=right_possible_type,
right_expr=right_expr,
context=context,
)
all_results.append(result)
all_inferred.append(inferred)
if local_errors.has_new_errors():
self.msg.add_errors(local_errors.filtered_errors())
# Point any notes to the same location as an existing message.
err = local_errors.filtered_errors()[-1]
recent_context = TempNode(NoneType())
recent_context.line = err.line
recent_context.column = err.column
if len(left_variants) >= 2 and len(right_variants) >= 2:
self.msg.warn_both_operands_are_from_unions(recent_context)
elif len(left_variants) >= 2:
self.msg.warn_operand_was_from_union("Left", base_type, context=recent_context)
elif len(right_variants) >= 2:
self.msg.warn_operand_was_from_union(
"Right", right_type, context=recent_context
)
# See the comment in 'check_overload_call' for more details on why
# we call 'combine_function_signature' instead of just unioning the inferred
# callable types.
results_final = make_simplified_union(all_results)
inferred_final = self.combine_function_signatures(get_proper_types(all_inferred))
return results_final, inferred_final
else:
return self.check_method_call_by_name(
method=method,
base_type=base_type,
args=[arg],
arg_kinds=[ARG_POS],
context=context,
)
def check_boolean_op(self, e: OpExpr, context: Context) -> Type:
"""Type check a boolean operation ('and' or 'or')."""
# A boolean operation can evaluate to either of the operands.
# We use the current type context to guide the type inference of of
# the left operand. We also use the left operand type to guide the type
# inference of the right operand so that expressions such as
# '[1] or []' are inferred correctly.
ctx = self.type_context[-1]
left_type = self.accept(e.left, ctx)
expanded_left_type = try_expanding_sum_type_to_union(
self.accept(e.left, ctx), "builtins.bool"
)
assert e.op in ("and", "or") # Checked by visit_op_expr
if e.right_always:
left_map: mypy.checker.TypeMap = None
right_map: mypy.checker.TypeMap = {}
elif e.right_unreachable:
left_map, right_map = {}, None
elif e.op == "and":
right_map, left_map = self.chk.find_isinstance_check(e.left)
elif e.op == "or":
left_map, right_map = self.chk.find_isinstance_check(e.left)
# If left_map is None then we know mypy considers the left expression
# to be redundant.
if (
codes.REDUNDANT_EXPR in self.chk.options.enabled_error_codes
and left_map is None
# don't report an error if it's intentional
and not e.right_always
):
self.msg.redundant_left_operand(e.op, e.left)
if (
self.chk.should_report_unreachable_issues()
and right_map is None
# don't report an error if it's intentional
and not e.right_unreachable
):
self.msg.unreachable_right_operand(e.op, e.right)
# If right_map is None then we know mypy considers the right branch
# to be unreachable and therefore any errors found in the right branch
# should be suppressed.
with self.msg.filter_errors(filter_errors=right_map is None):
right_type = self.analyze_cond_branch(right_map, e.right, expanded_left_type)
if left_map is None and right_map is None:
return UninhabitedType()
if right_map is None:
# The boolean expression is statically known to be the left value
assert left_map is not None
return left_type
if left_map is None:
# The boolean expression is statically known to be the right value
assert right_map is not None
return right_type
if e.op == "and":
restricted_left_type = false_only(expanded_left_type)
result_is_left = not expanded_left_type.can_be_true
elif e.op == "or":
restricted_left_type = true_only(expanded_left_type)
result_is_left = not expanded_left_type.can_be_false
if isinstance(restricted_left_type, UninhabitedType):
# The left operand can never be the result
return right_type
elif result_is_left:
# The left operand is always the result
return left_type
else:
return make_simplified_union([restricted_left_type, right_type])
def check_list_multiply(self, e: OpExpr) -> Type:
"""Type check an expression of form '[...] * e'.
Type inference is special-cased for this common construct.
"""
right_type = self.accept(e.right)
if is_subtype(right_type, self.named_type("builtins.int")):
# Special case: [...] * <int value>. Use the type context of the
# OpExpr, since the multiplication does not affect the type.
left_type = self.accept(e.left, type_context=self.type_context[-1])
else:
left_type = self.accept(e.left)
result, method_type = self.check_op("__mul__", left_type, e.right, e)
e.method_type = method_type
return result
def visit_assignment_expr(self, e: AssignmentExpr) -> Type:
value = self.accept(e.value)
self.chk.check_assignment(e.target, e.value)
self.chk.check_final(e)
if not has_uninhabited_component(value):
# TODO: can we get rid of this extra store_type()?
# Usually, check_assignment() already stores the lvalue type correctly.
self.chk.store_type(e.target, value)
self.find_partial_type_ref_fast_path(e.target)
return value
def visit_unary_expr(self, e: UnaryExpr) -> Type:
"""Type check an unary operation ('not', '-', '+' or '~')."""
operand_type = self.accept(e.expr)
op = e.op
if op == "not":
result: Type = self.bool_type()
self.chk.check_for_truthy_type(operand_type, e.expr)
else:
method = operators.unary_op_methods[op]
result, method_type = self.check_method_call_by_name(method, operand_type, [], [], e)
e.method_type = method_type
return result
def visit_index_expr(self, e: IndexExpr) -> Type:
"""Type check an index expression (base[index]).
It may also represent type application.
"""
result = self.visit_index_expr_helper(e)
result = self.narrow_type_from_binder(e, result)
p_result = get_proper_type(result)
if (
self.is_literal_context()
and isinstance(p_result, Instance)
and p_result.last_known_value is not None
):
result = p_result.last_known_value
return result
def visit_index_expr_helper(self, e: IndexExpr) -> Type:
if e.analyzed:
# It's actually a type application.
return self.accept(e.analyzed)
left_type = self.accept(e.base)
return self.visit_index_with_type(left_type, e)
def visit_index_with_type(
self, left_type: Type, e: IndexExpr, original_type: ProperType | None = None
) -> Type:
"""Analyze type of an index expression for a given type of base expression.
The 'original_type' is used for error messages (currently used for union types).
"""
index = e.index
left_type = get_proper_type(left_type)
# Visit the index, just to make sure we have a type for it available
self.accept(index)
if isinstance(left_type, TupleType) and any(
isinstance(it, UnpackType) for it in left_type.items
):
# Normalize variadic tuples for consistency.
left_type = expand_type(left_type, {})
if isinstance(left_type, UnionType):
original_type = original_type or left_type
# Don't combine literal types, since we may need them for type narrowing.
return make_simplified_union(
[
self.visit_index_with_type(typ, e, original_type)
for typ in left_type.relevant_items()
],
contract_literals=False,
)
elif isinstance(left_type, TupleType) and self.chk.in_checked_function():
# Special case for tuples. They return a more specific type when
# indexed by an integer literal.
if isinstance(index, SliceExpr):
return self.visit_tuple_slice_helper(left_type, index)
ns = self.try_getting_int_literals(index)
if ns is not None:
out = []
for n in ns:
item = self.visit_tuple_index_helper(left_type, n)
if item is not None:
out.append(item)
else:
self.chk.fail(message_registry.TUPLE_INDEX_OUT_OF_RANGE, e)
if any(isinstance(t, UnpackType) for t in left_type.items):
min_len = self.min_tuple_length(left_type)
self.chk.note(f"Variadic tuple can have length {min_len}", e)
return AnyType(TypeOfAny.from_error)
return make_simplified_union(out)
else:
return self.nonliteral_tuple_index_helper(left_type, index)
elif isinstance(left_type, TypedDictType):
return self.visit_typeddict_index_expr(left_type, e.index)[0]
elif isinstance(left_type, FunctionLike) and left_type.is_type_obj():
if left_type.type_object().is_enum:
return self.visit_enum_index_expr(left_type.type_object(), e.index, e)
elif self.chk.options.python_version >= (3, 9) and (
left_type.type_object().type_vars
or left_type.type_object().fullname == "builtins.type"
):
return self.named_type("types.GenericAlias")
if isinstance(left_type, TypeVarType) and not self.has_member(
left_type.upper_bound, "__getitem__"
):
return self.visit_index_with_type(left_type.upper_bound, e, original_type)
elif isinstance(left_type, Instance) and left_type.type.fullname == "typing._SpecialForm":
# Allow special forms to be indexed and used to create union types
return self.named_type("typing._SpecialForm")
else:
result, method_type = self.check_method_call_by_name(
"__getitem__", left_type, [e.index], [ARG_POS], e, original_type=original_type
)
e.method_type = method_type
return result
def min_tuple_length(self, left: TupleType) -> int:
unpack_index = find_unpack_in_list(left.items)
if unpack_index is None:
return left.length()
unpack = left.items[unpack_index]
assert isinstance(unpack, UnpackType)
if isinstance(unpack.type, TypeVarTupleType):
return left.length() - 1 + unpack.type.min_len
return left.length() - 1
def visit_tuple_index_helper(self, left: TupleType, n: int) -> Type | None:
unpack_index = find_unpack_in_list(left.items)
if unpack_index is None:
if n < 0:
n += len(left.items)
if 0 <= n < len(left.items):
return left.items[n]
return None
unpack = left.items[unpack_index]
assert isinstance(unpack, UnpackType)
unpacked = get_proper_type(unpack.type)
if isinstance(unpacked, TypeVarTupleType):
# Usually we say that TypeVarTuple can't be split, be in case of
# indexing it seems benign to just return the upper bound item, similar
# to what we do when indexing a regular TypeVar.
bound = get_proper_type(unpacked.upper_bound)
assert isinstance(bound, Instance)
assert bound.type.fullname == "builtins.tuple"
middle = bound.args[0]
else:
assert isinstance(unpacked, Instance)
assert unpacked.type.fullname == "builtins.tuple"
middle = unpacked.args[0]
extra_items = self.min_tuple_length(left) - left.length() + 1
if n >= 0:
if n >= self.min_tuple_length(left):
# For tuple[int, *tuple[str, ...], int] we allow either index 0 or 1,
# since variadic item may have zero items.
return None
if n < unpack_index:
return left.items[n]
return UnionType.make_union(
[middle]
+ left.items[unpack_index + 1 : max(n - extra_items + 2, unpack_index + 1)],
left.line,
left.column,
)
n += self.min_tuple_length(left)
if n < 0:
# Similar to above, we only allow -1, and -2 for tuple[int, *tuple[str, ...], int]
return None
if n >= unpack_index + extra_items:
return left.items[n - extra_items + 1]
return UnionType.make_union(
left.items[min(n, unpack_index) : unpack_index] + [middle], left.line, left.column
)
def visit_tuple_slice_helper(self, left_type: TupleType, slic: SliceExpr) -> Type:
begin: Sequence[int | None] = [None]
end: Sequence[int | None] = [None]
stride: Sequence[int | None] = [None]
if slic.begin_index:
begin_raw = self.try_getting_int_literals(slic.begin_index)
if begin_raw is None:
return self.nonliteral_tuple_index_helper(left_type, slic)
begin = begin_raw
if slic.end_index:
end_raw = self.try_getting_int_literals(slic.end_index)
if end_raw is None:
return self.nonliteral_tuple_index_helper(left_type, slic)
end = end_raw
if slic.stride:
stride_raw = self.try_getting_int_literals(slic.stride)
if stride_raw is None:
return self.nonliteral_tuple_index_helper(left_type, slic)
stride = stride_raw
items: list[Type] = []
for b, e, s in itertools.product(begin, end, stride):
item = left_type.slice(b, e, s, fallback=self.named_type("builtins.tuple"))
if item is None:
self.chk.fail(message_registry.AMBIGUOUS_SLICE_OF_VARIADIC_TUPLE, slic)
return AnyType(TypeOfAny.from_error)
items.append(item)
return make_simplified_union(items)
def try_getting_int_literals(self, index: Expression) -> list[int] | None:
"""If the given expression or type corresponds to an int literal
or a union of int literals, returns a list of the underlying ints.
Otherwise, returns None.
Specifically, this function is guaranteed to return a list with
one or more ints if one the following is true:
1. 'expr' is a IntExpr or a UnaryExpr backed by an IntExpr
2. 'typ' is a LiteralType containing an int
3. 'typ' is a UnionType containing only LiteralType of ints
"""
if isinstance(index, IntExpr):
return [index.value]
elif isinstance(index, UnaryExpr):
if index.op == "-":
operand = index.expr
if isinstance(operand, IntExpr):
return [-1 * operand.value]
if index.op == "+":
operand = index.expr
if isinstance(operand, IntExpr):
return [operand.value]
typ = get_proper_type(self.accept(index))
if isinstance(typ, Instance) and typ.last_known_value is not None:
typ = typ.last_known_value
if isinstance(typ, LiteralType) and isinstance(typ.value, int):
return [typ.value]
if isinstance(typ, UnionType):
out = []
for item in get_proper_types(typ.items):
if isinstance(item, LiteralType) and isinstance(item.value, int):
out.append(item.value)
else:
return None
return out
return None
def nonliteral_tuple_index_helper(self, left_type: TupleType, index: Expression) -> Type:
self.check_method_call_by_name("__getitem__", left_type, [index], [ARG_POS], context=index)
# We could return the return type from above, but unions are often better than the join
union = self.union_tuple_fallback_item(left_type)
if isinstance(index, SliceExpr):
return self.chk.named_generic_type("builtins.tuple", [union])
return union
def union_tuple_fallback_item(self, left_type: TupleType) -> Type:
# TODO: this duplicates logic in typeops.tuple_fallback().
items = []
for item in left_type.items:
if isinstance(item, UnpackType):
unpacked_type = get_proper_type(item.type)
if isinstance(unpacked_type, TypeVarTupleType):
unpacked_type = get_proper_type(unpacked_type.upper_bound)
if (
isinstance(unpacked_type, Instance)
and unpacked_type.type.fullname == "builtins.tuple"
):
items.append(unpacked_type.args[0])
else:
raise NotImplementedError
else:
items.append(item)
return make_simplified_union(items)
def visit_typeddict_index_expr(
self, td_type: TypedDictType, index: Expression, setitem: bool = False
) -> tuple[Type, set[str]]:
if isinstance(index, StrExpr):
key_names = [index.value]
else:
typ = get_proper_type(self.accept(index))
if isinstance(typ, UnionType):
key_types: list[Type] = list(typ.items)
else:
key_types = [typ]
key_names = []
for key_type in get_proper_types(key_types):
if isinstance(key_type, Instance) and key_type.last_known_value is not None:
key_type = key_type.last_known_value
if (
isinstance(key_type, LiteralType)
and isinstance(key_type.value, str)
and key_type.fallback.type.fullname != "builtins.bytes"
):
key_names.append(key_type.value)
else:
self.msg.typeddict_key_must_be_string_literal(td_type, index)
return AnyType(TypeOfAny.from_error), set()
value_types = []
for key_name in key_names:
value_type = td_type.items.get(key_name)
if value_type is None:
self.msg.typeddict_key_not_found(td_type, key_name, index, setitem)
return AnyType(TypeOfAny.from_error), set()
else:
value_types.append(value_type)
return make_simplified_union(value_types), set(key_names)
def visit_enum_index_expr(
self, enum_type: TypeInfo, index: Expression, context: Context
) -> Type:
string_type: Type = self.named_type("builtins.str")
self.chk.check_subtype(
self.accept(index),
string_type,
context,
"Enum index should be a string",
"actual index type",
)
return Instance(enum_type, [])
def visit_cast_expr(self, expr: CastExpr) -> Type:
"""Type check a cast expression."""
source_type = self.accept(
expr.expr,
type_context=AnyType(TypeOfAny.special_form),
allow_none_return=True,
always_allow_any=True,
)
target_type = expr.type
options = self.chk.options
if (
options.warn_redundant_casts
and not isinstance(get_proper_type(target_type), AnyType)
and source_type == target_type
):
self.msg.redundant_cast(target_type, expr)
if options.disallow_any_unimported and has_any_from_unimported_type(target_type):
self.msg.unimported_type_becomes_any("Target type of cast", target_type, expr)
check_for_explicit_any(
target_type, self.chk.options, self.chk.is_typeshed_stub, self.msg, context=expr
)
return target_type
def visit_assert_type_expr(self, expr: AssertTypeExpr) -> Type:
source_type = self.accept(
expr.expr,
type_context=self.type_context[-1],
allow_none_return=True,
always_allow_any=True,
)
if self.chk.current_node_deferred:
return source_type
target_type = expr.type
proper_source_type = get_proper_type(source_type)
if (
isinstance(proper_source_type, mypy.types.Instance)
and proper_source_type.last_known_value is not None
):
source_type = proper_source_type.last_known_value
if not is_same_type(source_type, target_type):
if not self.chk.in_checked_function():
self.msg.note(
'"assert_type" expects everything to be "Any" in unchecked functions',
expr.expr,
)
self.msg.assert_type_fail(source_type, target_type, expr)
return source_type
def visit_reveal_expr(self, expr: RevealExpr) -> Type:
"""Type check a reveal_type expression."""
if expr.kind == REVEAL_TYPE:
assert expr.expr is not None
revealed_type = self.accept(
expr.expr, type_context=self.type_context[-1], allow_none_return=True
)
if not self.chk.current_node_deferred:
self.msg.reveal_type(revealed_type, expr.expr)
if not self.chk.in_checked_function():
self.msg.note(
"'reveal_type' always outputs 'Any' in unchecked functions", expr.expr
)
self.check_reveal_imported(expr)
return revealed_type
else:
# REVEAL_LOCALS
if not self.chk.current_node_deferred:
# the RevealExpr contains a local_nodes attribute,
# calculated at semantic analysis time. Use it to pull out the
# corresponding subset of variables in self.chk.type_map
names_to_types = (
{var_node.name: var_node.type for var_node in expr.local_nodes}
if expr.local_nodes is not None
else {}
)
self.msg.reveal_locals(names_to_types, expr)
self.check_reveal_imported(expr)
return NoneType()
def check_reveal_imported(self, expr: RevealExpr) -> None:
if codes.UNIMPORTED_REVEAL not in self.chk.options.enabled_error_codes:
return
name = ""
if expr.kind == REVEAL_LOCALS:
name = "reveal_locals"
elif expr.kind == REVEAL_TYPE and not expr.is_imported:
name = "reveal_type"
else:
return
self.chk.fail(f'Name "{name}" is not defined', expr, code=codes.UNIMPORTED_REVEAL)
if name == "reveal_type":
module = (
"typing" if self.chk.options.python_version >= (3, 11) else "typing_extensions"
)
hint = (
'Did you forget to import it from "{module}"?'
' (Suggestion: "from {module} import {name}")'
).format(module=module, name=name)
self.chk.note(hint, expr, code=codes.UNIMPORTED_REVEAL)
def visit_type_application(self, tapp: TypeApplication) -> Type:
"""Type check a type application (expr[type, ...]).
There are two different options here, depending on whether expr refers
to a type alias or directly to a generic class. In the first case we need
to use a dedicated function typeanal.instantiate_type_alias(). This
is due to slight differences in how type arguments are applied and checked.
"""
if isinstance(tapp.expr, RefExpr) and isinstance(tapp.expr.node, TypeAlias):
if tapp.expr.node.python_3_12_type_alias:
return self.type_alias_type_type()
# Subscription of a (generic) alias in runtime context, expand the alias.
item = instantiate_type_alias(
tapp.expr.node,
tapp.types,
self.chk.fail,
tapp.expr.node.no_args,
tapp,
self.chk.options,
)
item = get_proper_type(item)
if isinstance(item, Instance):
tp = type_object_type(item.type, self.named_type)
return self.apply_type_arguments_to_callable(tp, item.args, tapp)
elif isinstance(item, TupleType) and item.partial_fallback.type.is_named_tuple:
tp = type_object_type(item.partial_fallback.type, self.named_type)
return self.apply_type_arguments_to_callable(tp, item.partial_fallback.args, tapp)
elif isinstance(item, TypedDictType):
return self.typeddict_callable_from_context(item)
else:
self.chk.fail(message_registry.ONLY_CLASS_APPLICATION, tapp)
return AnyType(TypeOfAny.from_error)
# Type application of a normal generic class in runtime context.
# This is typically used as `x = G[int]()`.
tp = get_proper_type(self.accept(tapp.expr))
if isinstance(tp, (CallableType, Overloaded)):
if not tp.is_type_obj():
self.chk.fail(message_registry.ONLY_CLASS_APPLICATION, tapp)
return self.apply_type_arguments_to_callable(tp, tapp.types, tapp)
if isinstance(tp, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=tp)
return AnyType(TypeOfAny.special_form)
def visit_type_alias_expr(self, alias: TypeAliasExpr) -> Type:
"""Right hand side of a type alias definition.
It has the same type as if the alias itself was used in a runtime context.
For example, here:
A = reveal_type(List[T])
reveal_type(A)
both `reveal_type` instances will reveal the same type `def (...) -> builtins.list[Any]`.
Note that type variables are implicitly substituted with `Any`.
"""
return self.alias_type_in_runtime_context(alias.node, ctx=alias, alias_definition=True)
def alias_type_in_runtime_context(
self, alias: TypeAlias, *, ctx: Context, alias_definition: bool = False
) -> Type:
"""Get type of a type alias (could be generic) in a runtime expression.
Note that this function can be called only if the alias appears _not_
as a target of type application, which is treated separately in the
visit_type_application method. Some examples where this method is called are
casts and instantiation:
class LongName(Generic[T]): ...
A = LongName[int]
x = A()
y = cast(A, ...)
"""
if alias.python_3_12_type_alias:
return self.type_alias_type_type()
if isinstance(alias.target, Instance) and alias.target.invalid: # type: ignore[misc]
# An invalid alias, error already has been reported
return AnyType(TypeOfAny.from_error)
# If this is a generic alias, we set all variables to `Any`.
# For example:
# A = List[Tuple[T, T]]
# x = A() <- same as List[Tuple[Any, Any]], see PEP 484.
disallow_any = self.chk.options.disallow_any_generics and self.is_callee
item = get_proper_type(
set_any_tvars(
alias,
[],
ctx.line,
ctx.column,
self.chk.options,
disallow_any=disallow_any,
fail=self.msg.fail,
)
)
if isinstance(item, Instance):
# Normally we get a callable type (or overloaded) with .is_type_obj() true
# representing the class's constructor
tp = type_object_type(item.type, self.named_type)
if alias.no_args:
return tp
return self.apply_type_arguments_to_callable(tp, item.args, ctx)
elif (
isinstance(item, TupleType)
and
# Tuple[str, int]() fails at runtime, only named tuples and subclasses work.
tuple_fallback(item).type.fullname != "builtins.tuple"
):
return type_object_type(tuple_fallback(item).type, self.named_type)
elif isinstance(item, TypedDictType):
return self.typeddict_callable_from_context(item)
elif isinstance(item, NoneType):
return TypeType(item, line=item.line, column=item.column)
elif isinstance(item, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=item)
elif (
isinstance(item, UnionType)
and item.uses_pep604_syntax
and self.chk.options.python_version >= (3, 10)
):
return self.chk.named_generic_type("types.UnionType", item.items)
else:
if alias_definition:
return AnyType(TypeOfAny.special_form)
# The _SpecialForm type can be used in some runtime contexts (e.g. it may have __or__).
return self.named_type("typing._SpecialForm")
def split_for_callable(
self, t: CallableType, args: Sequence[Type], ctx: Context
) -> list[Type]:
"""Handle directly applying type arguments to a variadic Callable.
This is needed in situations where e.g. variadic class object appears in
runtime context. For example:
class C(Generic[T, Unpack[Ts]]): ...
x = C[int, str]()
We simply group the arguments that need to go into Ts variable into a TupleType,
similar to how it is done in other places using split_with_prefix_and_suffix().
"""
if t.is_type_obj():
# Type arguments must map to class type variables, ignoring constructor vars.
vars = t.type_object().defn.type_vars
else:
vars = list(t.variables)
args = flatten_nested_tuples(args)
# TODO: this logic is duplicated with semanal_typeargs.
for tv, arg in zip(t.variables, args):
if isinstance(tv, ParamSpecType):
if not isinstance(
get_proper_type(arg), (Parameters, ParamSpecType, AnyType, UnboundType)
):
self.chk.fail(
"Can only replace ParamSpec with a parameter types list or"
f" another ParamSpec, got {format_type(arg, self.chk.options)}",
ctx,
)
return [AnyType(TypeOfAny.from_error)] * len(vars)
if not vars or not any(isinstance(v, TypeVarTupleType) for v in vars):
return list(args)
# TODO: in future we may want to support type application to variadic functions.
assert t.is_type_obj()
info = t.type_object()
# We reuse the logic from semanal phase to reduce code duplication.
fake = Instance(info, args, line=ctx.line, column=ctx.column)
# This code can be only called either from checking a type application, or from
# checking a type alias (after the caller handles no_args aliases), so we know it
# was initially an IndexExpr, and we allow empty tuple type arguments.
if not validate_instance(fake, self.chk.fail, empty_tuple_index=True):
fix_instance(
fake, self.chk.fail, self.chk.note, disallow_any=False, options=self.chk.options
)
args = list(fake.args)
prefix = next(i for (i, v) in enumerate(vars) if isinstance(v, TypeVarTupleType))
suffix = len(vars) - prefix - 1
tvt = vars[prefix]
assert isinstance(tvt, TypeVarTupleType)
start, middle, end = split_with_prefix_and_suffix(tuple(args), prefix, suffix)
return list(start) + [TupleType(list(middle), tvt.tuple_fallback)] + list(end)
def apply_type_arguments_to_callable(
self, tp: Type, args: Sequence[Type], ctx: Context
) -> Type:
"""Apply type arguments to a generic callable type coming from a type object.
This will first perform type arguments count checks, report the
error as needed, and return the correct kind of Any. As a special
case this returns Any for non-callable types, because if type object type
is not callable, then an error should be already reported.
"""
tp = get_proper_type(tp)
if isinstance(tp, CallableType):
if tp.is_type_obj():
# If we have a class object in runtime context, then the available type
# variables are those of the class, we don't include additional variables
# of the constructor. So that with
# class C(Generic[T]):
# def __init__(self, f: Callable[[S], T], x: S) -> None
# C[int] is valid
# C[int, str] is invalid (although C as a callable has 2 type variables)
# Note: various logic below and in applytype.py relies on the fact that
# class type variables appear *before* constructor variables.
type_vars = tp.type_object().defn.type_vars
else:
type_vars = list(tp.variables)
min_arg_count = sum(not v.has_default() for v in type_vars)
has_type_var_tuple = any(isinstance(v, TypeVarTupleType) for v in type_vars)
if (
len(args) < min_arg_count or len(args) > len(type_vars)
) and not has_type_var_tuple:
if tp.is_type_obj() and tp.type_object().fullname == "builtins.tuple":
# e.g. expression tuple[X, Y]
# - want the type of the expression i.e. a function with that as its return type
# - tp is type of tuple (note it won't have params as we are only called
# with generic callable type)
# - tuple[X, Y]() takes a single arg that is a tuple containing an X and a Y
return CallableType(
[TupleType(list(args), self.chk.named_type("tuple"))],
[ARG_POS],
[None],
TupleType(list(args), self.chk.named_type("tuple")),
tp.fallback,
name="tuple",
definition=tp.definition,
bound_args=tp.bound_args,
)
self.msg.incompatible_type_application(
min_arg_count, len(type_vars), len(args), ctx
)
return AnyType(TypeOfAny.from_error)
return self.apply_generic_arguments(tp, self.split_for_callable(tp, args, ctx), ctx)
if isinstance(tp, Overloaded):
for it in tp.items:
if tp.is_type_obj():
# Same as above.
type_vars = tp.type_object().defn.type_vars
else:
type_vars = list(it.variables)
min_arg_count = sum(not v.has_default() for v in type_vars)
has_type_var_tuple = any(isinstance(v, TypeVarTupleType) for v in type_vars)
if (
len(args) < min_arg_count or len(args) > len(type_vars)
) and not has_type_var_tuple:
self.msg.incompatible_type_application(
min_arg_count, len(type_vars), len(args), ctx
)
return AnyType(TypeOfAny.from_error)
return Overloaded(
[
self.apply_generic_arguments(it, self.split_for_callable(it, args, ctx), ctx)
for it in tp.items
]
)
return AnyType(TypeOfAny.special_form)
def visit_list_expr(self, e: ListExpr) -> Type:
"""Type check a list expression [...]."""
return self.check_lst_expr(e, "builtins.list", "<list>")
def visit_set_expr(self, e: SetExpr) -> Type:
return self.check_lst_expr(e, "builtins.set", "<set>")
def fast_container_type(
self, e: ListExpr | SetExpr | TupleExpr, container_fullname: str
) -> Type | None:
"""
Fast path to determine the type of a list or set literal,
based on the list of entries. This mostly impacts large
module-level constant definitions.
Limitations:
- no active type context
- no star expressions
- the joined type of all entries must be an Instance or Tuple type
"""
ctx = self.type_context[-1]
if ctx:
return None
rt = self.resolved_type.get(e, None)
if rt is not None:
return rt if isinstance(rt, Instance) else None
values: list[Type] = []
for item in e.items:
if isinstance(item, StarExpr):
# fallback to slow path
self.resolved_type[e] = NoneType()
return None
values.append(self.accept(item))
vt = join.join_type_list(values)
if not allow_fast_container_literal(vt):
self.resolved_type[e] = NoneType()
return None
ct = self.chk.named_generic_type(container_fullname, [vt])
self.resolved_type[e] = ct
return ct
def check_lst_expr(self, e: ListExpr | SetExpr | TupleExpr, fullname: str, tag: str) -> Type:
# fast path
t = self.fast_container_type(e, fullname)
if t:
return t
# Translate into type checking a generic function call.
# Used for list and set expressions, as well as for tuples
# containing star expressions that don't refer to a
# Tuple. (Note: "lst" stands for list-set-tuple. :-)
tv = TypeVarType(
"T",
"T",
id=TypeVarId(-1, namespace="<lst>"),
values=[],
upper_bound=self.object_type(),
default=AnyType(TypeOfAny.from_omitted_generics),
)
constructor = CallableType(
[tv],
[nodes.ARG_STAR],
[None],
self.chk.named_generic_type(fullname, [tv]),
self.named_type("builtins.function"),
name=tag,
variables=[tv],
)
out = self.check_call(
constructor,
[(i.expr if isinstance(i, StarExpr) else i) for i in e.items],
[(nodes.ARG_STAR if isinstance(i, StarExpr) else nodes.ARG_POS) for i in e.items],
e,
)[0]
return remove_instance_last_known_values(out)
def tuple_context_matches(self, expr: TupleExpr, ctx: TupleType) -> bool:
ctx_unpack_index = find_unpack_in_list(ctx.items)
if ctx_unpack_index is None:
# For fixed tuples accept everything that can possibly match, even if this
# requires all star items to be empty.
return len([e for e in expr.items if not isinstance(e, StarExpr)]) <= len(ctx.items)
# For variadic context, the only easy case is when structure matches exactly.
# TODO: try using tuple type context in more cases.
if len([e for e in expr.items if isinstance(e, StarExpr)]) != 1:
return False
expr_star_index = next(i for i, lv in enumerate(expr.items) if isinstance(lv, StarExpr))
return len(expr.items) == len(ctx.items) and ctx_unpack_index == expr_star_index
def visit_tuple_expr(self, e: TupleExpr) -> Type:
"""Type check a tuple expression."""
# Try to determine type context for type inference.
type_context = get_proper_type(self.type_context[-1])
type_context_items = None
if isinstance(type_context, UnionType):
tuples_in_context = [
t
for t in get_proper_types(type_context.items)
if (isinstance(t, TupleType) and self.tuple_context_matches(e, t))
or is_named_instance(t, TUPLE_LIKE_INSTANCE_NAMES)
]
if len(tuples_in_context) == 1:
type_context = tuples_in_context[0]
else:
# There are either no relevant tuples in the Union, or there is
# more than one. Either way, we can't decide on a context.
pass
if isinstance(type_context, TupleType) and self.tuple_context_matches(e, type_context):
type_context_items = type_context.items
elif type_context and is_named_instance(type_context, TUPLE_LIKE_INSTANCE_NAMES):
assert isinstance(type_context, Instance)
if type_context.args:
type_context_items = [type_context.args[0]] * len(e.items)
# NOTE: it's possible for the context to have a different
# number of items than e. In that case we use those context
# items that match a position in e, and we'll worry about type
# mismatches later.
unpack_in_context = False
if type_context_items is not None:
unpack_in_context = find_unpack_in_list(type_context_items) is not None
seen_unpack_in_items = False
allow_precise_tuples = (
unpack_in_context or PRECISE_TUPLE_TYPES in self.chk.options.enable_incomplete_feature
)
# Infer item types. Give up if there's a star expression
# that's not a Tuple.
items: list[Type] = []
j = 0 # Index into type_context_items; irrelevant if type_context_items is none
for i in range(len(e.items)):
item = e.items[i]
if isinstance(item, StarExpr):
# Special handling for star expressions.
# TODO: If there's a context, and item.expr is a
# TupleExpr, flatten it, so we can benefit from the
# context? Counterargument: Why would anyone write
# (1, *(2, 3)) instead of (1, 2, 3) except in a test?
if unpack_in_context:
# Note: this logic depends on full structure match in tuple_context_matches().
assert type_context_items
ctx_item = type_context_items[j]
assert isinstance(ctx_item, UnpackType)
ctx = ctx_item.type
else:
ctx = None
tt = self.accept(item.expr, ctx)
tt = get_proper_type(tt)
if isinstance(tt, TupleType):
if find_unpack_in_list(tt.items) is not None:
if seen_unpack_in_items:
# Multiple unpack items are not allowed in tuples,
# fall back to instance type.
return self.check_lst_expr(e, "builtins.tuple", "<tuple>")
else:
seen_unpack_in_items = True
items.extend(tt.items)
# Note: this logic depends on full structure match in tuple_context_matches().
if unpack_in_context:
j += 1
else:
# If there is an unpack in expressions, but not in context, this will
# result in an error later, just do something predictable here.
j += len(tt.items)
else:
if allow_precise_tuples and not seen_unpack_in_items:
# Handle (x, *y, z), where y is e.g. tuple[Y, ...].
if isinstance(tt, Instance) and self.chk.type_is_iterable(tt):
item_type = self.chk.iterable_item_type(tt, e)
mapped = self.chk.named_generic_type("builtins.tuple", [item_type])
items.append(UnpackType(mapped))
seen_unpack_in_items = True
continue
# A star expression that's not a Tuple.
# Treat the whole thing as a variable-length tuple.
return self.check_lst_expr(e, "builtins.tuple", "<tuple>")
else:
if not type_context_items or j >= len(type_context_items):
tt = self.accept(item)
else:
tt = self.accept(item, type_context_items[j])
j += 1
items.append(tt)
# This is a partial fallback item type. A precise type will be calculated on demand.
fallback_item = AnyType(TypeOfAny.special_form)
result: ProperType = TupleType(
items, self.chk.named_generic_type("builtins.tuple", [fallback_item])
)
if seen_unpack_in_items:
# Return already normalized tuple type just in case.
result = expand_type(result, {})
return result
def fast_dict_type(self, e: DictExpr) -> Type | None:
"""
Fast path to determine the type of a dict literal,
based on the list of entries. This mostly impacts large
module-level constant definitions.
Limitations:
- no active type context
- only supported star expressions are other dict instances
- the joined types of all keys and values must be Instance or Tuple types
"""
ctx = self.type_context[-1]
if ctx:
return None
rt = self.resolved_type.get(e, None)
if rt is not None:
return rt if isinstance(rt, Instance) else None
keys: list[Type] = []
values: list[Type] = []
stargs: tuple[Type, Type] | None = None
for key, value in e.items:
if key is None:
st = get_proper_type(self.accept(value))
if (
isinstance(st, Instance)
and st.type.fullname == "builtins.dict"
and len(st.args) == 2
):
stargs = (st.args[0], st.args[1])
else:
self.resolved_type[e] = NoneType()
return None
else:
keys.append(self.accept(key))
values.append(self.accept(value))
kt = join.join_type_list(keys)
vt = join.join_type_list(values)
if not (allow_fast_container_literal(kt) and allow_fast_container_literal(vt)):
self.resolved_type[e] = NoneType()
return None
if stargs and (stargs[0] != kt or stargs[1] != vt):
self.resolved_type[e] = NoneType()
return None
dt = self.chk.named_generic_type("builtins.dict", [kt, vt])
self.resolved_type[e] = dt
return dt
def check_typeddict_literal_in_context(
self, e: DictExpr, typeddict_context: TypedDictType
) -> Type:
orig_ret_type = self.check_typeddict_call_with_dict(
callee=typeddict_context, kwargs=e.items, context=e, orig_callee=None
)
ret_type = get_proper_type(orig_ret_type)
if isinstance(ret_type, TypedDictType):
return ret_type.copy_modified()
return typeddict_context.copy_modified()
def visit_dict_expr(self, e: DictExpr) -> Type:
"""Type check a dict expression.
Translate it into a call to dict(), with provisions for **expr.
"""
# if the dict literal doesn't match TypedDict, check_typeddict_call_with_dict reports
# an error, but returns the TypedDict type that matches the literal it found
# that would cause a second error when that TypedDict type is returned upstream
# to avoid the second error, we always return TypedDict type that was requested
typeddict_contexts = self.find_typeddict_context(self.type_context[-1], e)
if typeddict_contexts:
if len(typeddict_contexts) == 1:
return self.check_typeddict_literal_in_context(e, typeddict_contexts[0])
# Multiple items union, check if at least one of them matches cleanly.
for typeddict_context in typeddict_contexts:
with self.msg.filter_errors() as err, self.chk.local_type_map() as tmap:
ret_type = self.check_typeddict_literal_in_context(e, typeddict_context)
if err.has_new_errors():
continue
self.chk.store_types(tmap)
return ret_type
# No item matched without an error, so we can't unambiguously choose the item.
self.msg.typeddict_context_ambiguous(typeddict_contexts, e)
# fast path attempt
dt = self.fast_dict_type(e)
if dt:
return dt
# Define type variables (used in constructors below).
kt = TypeVarType(
"KT",
"KT",
id=TypeVarId(-1, namespace="<dict>"),
values=[],
upper_bound=self.object_type(),
default=AnyType(TypeOfAny.from_omitted_generics),
)
vt = TypeVarType(
"VT",
"VT",
id=TypeVarId(-2, namespace="<dict>"),
values=[],
upper_bound=self.object_type(),
default=AnyType(TypeOfAny.from_omitted_generics),
)
# Collect function arguments, watching out for **expr.
args: list[Expression] = []
expected_types: list[Type] = []
for key, value in e.items:
if key is None:
args.append(value)
expected_types.append(
self.chk.named_generic_type("_typeshed.SupportsKeysAndGetItem", [kt, vt])
)
else:
tup = TupleExpr([key, value])
if key.line >= 0:
tup.line = key.line
tup.column = key.column
else:
tup.line = value.line
tup.column = value.column
tup.end_line = value.end_line
tup.end_column = value.end_column
args.append(tup)
expected_types.append(TupleType([kt, vt], self.named_type("builtins.tuple")))
# The callable type represents a function like this (except we adjust for **expr):
# def <dict>(*v: Tuple[kt, vt]) -> Dict[kt, vt]: ...
constructor = CallableType(
expected_types,
[nodes.ARG_POS] * len(expected_types),
[None] * len(expected_types),
self.chk.named_generic_type("builtins.dict", [kt, vt]),
self.named_type("builtins.function"),
name="<dict>",
variables=[kt, vt],
)
return self.check_call(constructor, args, [nodes.ARG_POS] * len(args), e)[0]
def find_typeddict_context(
self, context: Type | None, dict_expr: DictExpr
) -> list[TypedDictType]:
context = get_proper_type(context)
if isinstance(context, TypedDictType):
return [context]
elif isinstance(context, UnionType):
items = []
for item in context.items:
item_contexts = self.find_typeddict_context(item, dict_expr)
for item_context in item_contexts:
if self.match_typeddict_call_with_dict(
item_context, dict_expr.items, dict_expr
):
items.append(item_context)
return items
# No TypedDict type in context.
return []
def visit_lambda_expr(self, e: LambdaExpr) -> Type:
"""Type check lambda expression."""
self.chk.check_default_args(e, body_is_trivial=False)
inferred_type, type_override = self.infer_lambda_type_using_context(e)
if not inferred_type:
self.chk.return_types.append(AnyType(TypeOfAny.special_form))
# Type check everything in the body except for the final return
# statement (it can contain tuple unpacking before return).
with self.chk.binder.frame_context(
can_skip=True, fall_through=0
), self.chk.scope.push_function(e):
# Lambdas can have more than one element in body,
# when we add "fictional" AssigmentStatement nodes, like in:
# `lambda (a, b): a`
for stmt in e.body.body[:-1]:
stmt.accept(self.chk)
# Only type check the return expression, not the return statement.
# There's no useful type context.
ret_type = self.accept(e.expr(), allow_none_return=True)
fallback = self.named_type("builtins.function")
self.chk.return_types.pop()
return callable_type(e, fallback, ret_type)
else:
# Type context available.
self.chk.return_types.append(inferred_type.ret_type)
with self.chk.tscope.function_scope(e):
self.chk.check_func_item(e, type_override=type_override)
if not self.chk.has_type(e.expr()):
# TODO: return expression must be accepted before exiting function scope.
with self.chk.binder.frame_context(can_skip=True, fall_through=0):
self.accept(e.expr(), allow_none_return=True)
ret_type = self.chk.lookup_type(e.expr())
self.chk.return_types.pop()
return replace_callable_return_type(inferred_type, ret_type)
def infer_lambda_type_using_context(
self, e: LambdaExpr
) -> tuple[CallableType | None, CallableType | None]:
"""Try to infer lambda expression type using context.
Return None if could not infer type.
The second item in the return type is the type_override parameter for check_func_item.
"""
# TODO also accept 'Any' context
ctx = get_proper_type(self.type_context[-1])
if isinstance(ctx, UnionType):
callables = [
t for t in get_proper_types(ctx.relevant_items()) if isinstance(t, CallableType)
]
if len(callables) == 1:
ctx = callables[0]
if not ctx or not isinstance(ctx, CallableType):
return None, None
# The context may have function type variables in it. We replace them
# since these are the type variables we are ultimately trying to infer;
# they must be considered as indeterminate. We use ErasedType since it
# does not affect type inference results (it is for purposes like this
# only).
if not self.chk.options.old_type_inference:
# With new type inference we can preserve argument types even if they
# are generic, since new inference algorithm can handle constraints
# like S <: T (we still erase return type since it's ultimately unknown).
extra_vars = []
for arg in ctx.arg_types:
meta_vars = [tv for tv in get_all_type_vars(arg) if tv.id.is_meta_var()]
extra_vars.extend([tv for tv in meta_vars if tv not in extra_vars])
callable_ctx = ctx.copy_modified(
ret_type=replace_meta_vars(ctx.ret_type, ErasedType()),
variables=list(ctx.variables) + extra_vars,
)
else:
erased_ctx = replace_meta_vars(ctx, ErasedType())
assert isinstance(erased_ctx, ProperType) and isinstance(erased_ctx, CallableType)
callable_ctx = erased_ctx
# The callable_ctx may have a fallback of builtins.type if the context
# is a constructor -- but this fallback doesn't make sense for lambdas.
callable_ctx = callable_ctx.copy_modified(fallback=self.named_type("builtins.function"))
if callable_ctx.type_guard is not None or callable_ctx.type_is is not None:
# Lambda's return type cannot be treated as a `TypeGuard`,
# because it is implicit. And `TypeGuard`s must be explicit.
# See https://github.com/python/mypy/issues/9927
return None, None
arg_kinds = [arg.kind for arg in e.arguments]
if callable_ctx.is_ellipsis_args or ctx.param_spec() is not None:
# Fill in Any arguments to match the arguments of the lambda.
callable_ctx = callable_ctx.copy_modified(
is_ellipsis_args=False,
arg_types=[AnyType(TypeOfAny.special_form)] * len(arg_kinds),
arg_kinds=arg_kinds,
arg_names=e.arg_names.copy(),
)
if ARG_STAR in arg_kinds or ARG_STAR2 in arg_kinds:
# TODO treat this case appropriately
return callable_ctx, None
if callable_ctx.arg_kinds != arg_kinds:
# Incompatible context; cannot use it to infer types.
self.chk.fail(message_registry.CANNOT_INFER_LAMBDA_TYPE, e)
return None, None
# Type of lambda must have correct argument names, to prevent false
# negatives when lambdas appear in `ParamSpec` context.
return callable_ctx.copy_modified(arg_names=e.arg_names), callable_ctx
def visit_super_expr(self, e: SuperExpr) -> Type:
"""Type check a super expression (non-lvalue)."""
# We have an expression like super(T, var).member
# First compute the types of T and var
types = self._super_arg_types(e)
if isinstance(types, tuple):
type_type, instance_type = types
else:
return types
# Now get the MRO
type_info = type_info_from_type(type_type)
if type_info is None:
self.chk.fail(message_registry.UNSUPPORTED_ARG_1_FOR_SUPER, e)
return AnyType(TypeOfAny.from_error)
instance_info = type_info_from_type(instance_type)
if instance_info is None:
self.chk.fail(message_registry.UNSUPPORTED_ARG_2_FOR_SUPER, e)
return AnyType(TypeOfAny.from_error)
mro = instance_info.mro
# The base is the first MRO entry *after* type_info that has a member
# with the right name
index = None
if type_info in mro:
index = mro.index(type_info)
else:
method = self.chk.scope.top_function()
# Mypy explicitly allows supertype upper bounds (and no upper bound at all)
# for annotating self-types. However, if such an annotation is used for
# checking super() we will still get an error. So to be consistent, we also
# allow such imprecise annotations for use with super(), where we fall back
# to the current class MRO instead. This works only from inside a method.
if method is not None and is_self_type_like(
instance_type, is_classmethod=method.is_class
):
if e.info and type_info in e.info.mro:
mro = e.info.mro
index = mro.index(type_info)
if index is None:
if (
instance_info.is_protocol
and instance_info != type_info
and not type_info.is_protocol
):
# A special case for mixins, in this case super() should point
# directly to the host protocol, this is not safe, since the real MRO
# is not known yet for mixin, but this feature is more like an escape hatch.
index = -1
else:
self.chk.fail(message_registry.SUPER_ARG_2_NOT_INSTANCE_OF_ARG_1, e)
return AnyType(TypeOfAny.from_error)
if len(mro) == index + 1:
self.chk.fail(message_registry.TARGET_CLASS_HAS_NO_BASE_CLASS, e)
return AnyType(TypeOfAny.from_error)
for base in mro[index + 1 :]:
if e.name in base.names or base == mro[-1]:
if e.info and e.info.fallback_to_any and base == mro[-1]:
# There's an undefined base class, and we're at the end of the
# chain. That's not an error.
return AnyType(TypeOfAny.special_form)
return analyze_member_access(
name=e.name,
typ=instance_type,
is_lvalue=False,
is_super=True,
is_operator=False,
original_type=instance_type,
override_info=base,
context=e,
msg=self.msg,
chk=self.chk,
in_literal_context=self.is_literal_context(),
)
assert False, "unreachable"
def _super_arg_types(self, e: SuperExpr) -> Type | tuple[Type, Type]:
"""
Computes the types of the type and instance expressions in super(T, instance), or the
implicit ones for zero-argument super() expressions. Returns a single type for the whole
super expression when possible (for errors, anys), otherwise the pair of computed types.
"""
if not self.chk.in_checked_function():
return AnyType(TypeOfAny.unannotated)
elif len(e.call.args) == 0:
if not e.info:
# This has already been reported by the semantic analyzer.
return AnyType(TypeOfAny.from_error)
elif self.chk.scope.active_class():
self.chk.fail(message_registry.SUPER_OUTSIDE_OF_METHOD_NOT_SUPPORTED, e)
return AnyType(TypeOfAny.from_error)
# Zero-argument super() is like super(<current class>, <self>)
current_type = fill_typevars(e.info)
type_type: ProperType = TypeType(current_type)
# Use the type of the self argument, in case it was annotated
method = self.chk.scope.top_function()
assert method is not None
if method.arguments:
instance_type: Type = method.arguments[0].variable.type or current_type
else:
self.chk.fail(message_registry.SUPER_ENCLOSING_POSITIONAL_ARGS_REQUIRED, e)
return AnyType(TypeOfAny.from_error)
elif ARG_STAR in e.call.arg_kinds:
self.chk.fail(message_registry.SUPER_VARARGS_NOT_SUPPORTED, e)
return AnyType(TypeOfAny.from_error)
elif set(e.call.arg_kinds) != {ARG_POS}:
self.chk.fail(message_registry.SUPER_POSITIONAL_ARGS_REQUIRED, e)
return AnyType(TypeOfAny.from_error)
elif len(e.call.args) == 1:
self.chk.fail(message_registry.SUPER_WITH_SINGLE_ARG_NOT_SUPPORTED, e)
return AnyType(TypeOfAny.from_error)
elif len(e.call.args) == 2:
type_type = get_proper_type(self.accept(e.call.args[0]))
instance_type = self.accept(e.call.args[1])
else:
self.chk.fail(message_registry.TOO_MANY_ARGS_FOR_SUPER, e)
return AnyType(TypeOfAny.from_error)
# Imprecisely assume that the type is the current class
if isinstance(type_type, AnyType):
if e.info:
type_type = TypeType(fill_typevars(e.info))
else:
return AnyType(TypeOfAny.from_another_any, source_any=type_type)
elif isinstance(type_type, TypeType):
type_item = type_type.item
if isinstance(type_item, AnyType):
if e.info:
type_type = TypeType(fill_typevars(e.info))
else:
return AnyType(TypeOfAny.from_another_any, source_any=type_item)
if not isinstance(type_type, TypeType) and not (
isinstance(type_type, FunctionLike) and type_type.is_type_obj()
):
self.msg.first_argument_for_super_must_be_type(type_type, e)
return AnyType(TypeOfAny.from_error)
# Imprecisely assume that the instance is of the current class
instance_type = get_proper_type(instance_type)
if isinstance(instance_type, AnyType):
if e.info:
instance_type = fill_typevars(e.info)
else:
return AnyType(TypeOfAny.from_another_any, source_any=instance_type)
elif isinstance(instance_type, TypeType):
instance_item = instance_type.item
if isinstance(instance_item, AnyType):
if e.info:
instance_type = TypeType(fill_typevars(e.info))
else:
return AnyType(TypeOfAny.from_another_any, source_any=instance_item)
return type_type, instance_type
def visit_slice_expr(self, e: SliceExpr) -> Type:
try:
supports_index = self.chk.named_type("typing_extensions.SupportsIndex")
except KeyError:
supports_index = self.chk.named_type("builtins.int") # thanks, fixture life
expected = make_optional_type(supports_index)
for index in [e.begin_index, e.end_index, e.stride]:
if index:
t = self.accept(index)
self.chk.check_subtype(t, expected, index, message_registry.INVALID_SLICE_INDEX)
return self.named_type("builtins.slice")
def visit_list_comprehension(self, e: ListComprehension) -> Type:
return self.check_generator_or_comprehension(
e.generator, "builtins.list", "<list-comprehension>"
)
def visit_set_comprehension(self, e: SetComprehension) -> Type:
return self.check_generator_or_comprehension(
e.generator, "builtins.set", "<set-comprehension>"
)
def visit_generator_expr(self, e: GeneratorExpr) -> Type:
# If any of the comprehensions use async for, the expression will return an async generator
# object, or await is used anywhere but in the leftmost sequence.
if (
any(e.is_async)
or has_await_expression(e.left_expr)
or any(has_await_expression(sequence) for sequence in e.sequences[1:])
or any(has_await_expression(cond) for condlist in e.condlists for cond in condlist)
):
typ = "typing.AsyncGenerator"
# received type is always None in async generator expressions
additional_args: list[Type] = [NoneType()]
else:
typ = "typing.Generator"
# received type and returned type are None
additional_args = [NoneType(), NoneType()]
return self.check_generator_or_comprehension(
e, typ, "<generator>", additional_args=additional_args
)
def check_generator_or_comprehension(
self,
gen: GeneratorExpr,
type_name: str,
id_for_messages: str,
additional_args: list[Type] | None = None,
) -> Type:
"""Type check a generator expression or a list comprehension."""
additional_args = additional_args or []
with self.chk.binder.frame_context(can_skip=True, fall_through=0):
self.check_for_comp(gen)
# Infer the type of the list comprehension by using a synthetic generic
# callable type.
tv = TypeVarType(
"T",
"T",
id=TypeVarId(-1, namespace="<genexp>"),
values=[],
upper_bound=self.object_type(),
default=AnyType(TypeOfAny.from_omitted_generics),
)
tv_list: list[Type] = [tv]
constructor = CallableType(
tv_list,
[nodes.ARG_POS],
[None],
self.chk.named_generic_type(type_name, tv_list + additional_args),
self.chk.named_type("builtins.function"),
name=id_for_messages,
variables=[tv],
)
return self.check_call(constructor, [gen.left_expr], [nodes.ARG_POS], gen)[0]
def visit_dictionary_comprehension(self, e: DictionaryComprehension) -> Type:
"""Type check a dictionary comprehension."""
with self.chk.binder.frame_context(can_skip=True, fall_through=0):
self.check_for_comp(e)
# Infer the type of the list comprehension by using a synthetic generic
# callable type.
ktdef = TypeVarType(
"KT",
"KT",
id=TypeVarId(-1, namespace="<dict>"),
values=[],
upper_bound=self.object_type(),
default=AnyType(TypeOfAny.from_omitted_generics),
)
vtdef = TypeVarType(
"VT",
"VT",
id=TypeVarId(-2, namespace="<dict>"),
values=[],
upper_bound=self.object_type(),
default=AnyType(TypeOfAny.from_omitted_generics),
)
constructor = CallableType(
[ktdef, vtdef],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
self.chk.named_generic_type("builtins.dict", [ktdef, vtdef]),
self.chk.named_type("builtins.function"),
name="<dictionary-comprehension>",
variables=[ktdef, vtdef],
)
return self.check_call(
constructor, [e.key, e.value], [nodes.ARG_POS, nodes.ARG_POS], e
)[0]
def check_for_comp(self, e: GeneratorExpr | DictionaryComprehension) -> None:
"""Check the for_comp part of comprehensions. That is the part from 'for':
... for x in y if z
Note: This adds the type information derived from the condlists to the current binder.
"""
for index, sequence, conditions, is_async in zip(
e.indices, e.sequences, e.condlists, e.is_async
):
if is_async:
_, sequence_type = self.chk.analyze_async_iterable_item_type(sequence)
else:
_, sequence_type = self.chk.analyze_iterable_item_type(sequence)
self.chk.analyze_index_variables(index, sequence_type, True, e)
for condition in conditions:
self.accept(condition)
# values are only part of the comprehension when all conditions are true
true_map, false_map = self.chk.find_isinstance_check(condition)
if true_map:
self.chk.push_type_map(true_map)
if codes.REDUNDANT_EXPR in self.chk.options.enabled_error_codes:
if true_map is None:
self.msg.redundant_condition_in_comprehension(False, condition)
elif false_map is None:
self.msg.redundant_condition_in_comprehension(True, condition)
def visit_conditional_expr(self, e: ConditionalExpr, allow_none_return: bool = False) -> Type:
self.accept(e.cond)
ctx = self.type_context[-1]
# Gain type information from isinstance if it is there
# but only for the current expression
if_map, else_map = self.chk.find_isinstance_check(e.cond)
if codes.REDUNDANT_EXPR in self.chk.options.enabled_error_codes:
if if_map is None:
self.msg.redundant_condition_in_if(False, e.cond)
elif else_map is None:
self.msg.redundant_condition_in_if(True, e.cond)
if_type = self.analyze_cond_branch(
if_map, e.if_expr, context=ctx, allow_none_return=allow_none_return
)
# we want to keep the narrowest value of if_type for union'ing the branches
# however, it would be silly to pass a literal as a type context. Pass the
# underlying fallback type instead.
if_type_fallback = simple_literal_type(get_proper_type(if_type)) or if_type
# Analyze the right branch using full type context and store the type
full_context_else_type = self.analyze_cond_branch(
else_map, e.else_expr, context=ctx, allow_none_return=allow_none_return
)
if not mypy.checker.is_valid_inferred_type(if_type):
# Analyze the right branch disregarding the left branch.
else_type = full_context_else_type
# we want to keep the narrowest value of else_type for union'ing the branches
# however, it would be silly to pass a literal as a type context. Pass the
# underlying fallback type instead.
else_type_fallback = simple_literal_type(get_proper_type(else_type)) or else_type
# If it would make a difference, re-analyze the left
# branch using the right branch's type as context.
if ctx is None or not is_equivalent(else_type_fallback, ctx):
# TODO: If it's possible that the previous analysis of
# the left branch produced errors that are avoided
# using this context, suppress those errors.
if_type = self.analyze_cond_branch(
if_map,
e.if_expr,
context=else_type_fallback,
allow_none_return=allow_none_return,
)
elif if_type_fallback == ctx:
# There is no point re-running the analysis if if_type is equal to ctx.
# That would be an exact duplicate of the work we just did.
# This optimization is particularly important to avoid exponential blowup with nested
# if/else expressions: https://github.com/python/mypy/issues/9591
# TODO: would checking for is_proper_subtype also work and cover more cases?
else_type = full_context_else_type
else:
# Analyze the right branch in the context of the left
# branch's type.
else_type = self.analyze_cond_branch(
else_map,
e.else_expr,
context=if_type_fallback,
allow_none_return=allow_none_return,
)
res: Type = make_simplified_union([if_type, else_type])
if has_uninhabited_component(res) and not isinstance(
get_proper_type(self.type_context[-1]), UnionType
):
# In rare cases with empty collections join may give a better result.
alternative = join.join_types(if_type, else_type)
p_alt = get_proper_type(alternative)
if not isinstance(p_alt, Instance) or p_alt.type.fullname != "builtins.object":
res = alternative
return res
def analyze_cond_branch(
self,
map: dict[Expression, Type] | None,
node: Expression,
context: Type | None,
allow_none_return: bool = False,
) -> Type:
with self.chk.binder.frame_context(can_skip=True, fall_through=0):
if map is None:
# We still need to type check node, in case we want to
# process it for isinstance checks later
self.accept(node, type_context=context, allow_none_return=allow_none_return)
return UninhabitedType()
self.chk.push_type_map(map)
return self.accept(node, type_context=context, allow_none_return=allow_none_return)
#
# Helpers
#
def accept(
self,
node: Expression,
type_context: Type | None = None,
allow_none_return: bool = False,
always_allow_any: bool = False,
is_callee: bool = False,
) -> Type:
"""Type check a node in the given type context. If allow_none_return
is True and this expression is a call, allow it to return None. This
applies only to this expression and not any subexpressions.
"""
if node in self.type_overrides:
# This branch is very fast, there is no point timing it.
return self.type_overrides[node]
# We don't use context manager here to get most precise data (and avoid overhead).
record_time = False
if self.collect_line_checking_stats and not self.in_expression:
t0 = time.perf_counter_ns()
self.in_expression = True
record_time = True
self.type_context.append(type_context)
old_is_callee = self.is_callee
self.is_callee = is_callee
try:
if allow_none_return and isinstance(node, CallExpr):
typ = self.visit_call_expr(node, allow_none_return=True)
elif allow_none_return and isinstance(node, YieldFromExpr):
typ = self.visit_yield_from_expr(node, allow_none_return=True)
elif allow_none_return and isinstance(node, ConditionalExpr):
typ = self.visit_conditional_expr(node, allow_none_return=True)
elif allow_none_return and isinstance(node, AwaitExpr):
typ = self.visit_await_expr(node, allow_none_return=True)
else:
typ = node.accept(self)
except Exception as err:
report_internal_error(
err, self.chk.errors.file, node.line, self.chk.errors, self.chk.options
)
self.is_callee = old_is_callee
self.type_context.pop()
assert typ is not None
self.chk.store_type(node, typ)
if (
self.chk.options.disallow_any_expr
and not always_allow_any
and not self.chk.is_stub
and self.chk.in_checked_function()
and has_any_type(typ)
and not self.chk.current_node_deferred
):
self.msg.disallowed_any_type(typ, node)
if not self.chk.in_checked_function() or self.chk.current_node_deferred:
result: Type = AnyType(TypeOfAny.unannotated)
else:
result = typ
if record_time:
self.per_line_checking_time_ns[node.line] += time.perf_counter_ns() - t0
self.in_expression = False
return result
def named_type(self, name: str) -> Instance:
"""Return an instance type with type given by the name and no type
arguments. Alias for TypeChecker.named_type.
"""
return self.chk.named_type(name)
def type_alias_type_type(self) -> Instance:
"""Returns a `typing.TypeAliasType` or `typing_extensions.TypeAliasType`."""
if self.chk.options.python_version >= (3, 12):
return self.named_type("typing.TypeAliasType")
return self.named_type("typing_extensions.TypeAliasType")
def is_valid_var_arg(self, typ: Type) -> bool:
"""Is a type valid as a *args argument?"""
typ = get_proper_type(typ)
return isinstance(typ, (TupleType, AnyType, ParamSpecType, UnpackType)) or is_subtype(
typ, self.chk.named_generic_type("typing.Iterable", [AnyType(TypeOfAny.special_form)])
)
def is_valid_keyword_var_arg(self, typ: Type) -> bool:
"""Is a type valid as a **kwargs argument?"""
return (
is_subtype(
typ,
self.chk.named_generic_type(
"_typeshed.SupportsKeysAndGetItem",
[self.named_type("builtins.str"), AnyType(TypeOfAny.special_form)],
),
)
or is_subtype(
typ,
self.chk.named_generic_type(
"_typeshed.SupportsKeysAndGetItem", [UninhabitedType(), UninhabitedType()]
),
)
or isinstance(typ, ParamSpecType)
)
def has_member(self, typ: Type, member: str) -> bool:
"""Does type have member with the given name?"""
# TODO: refactor this to use checkmember.analyze_member_access, otherwise
# these two should be carefully kept in sync.
# This is much faster than analyze_member_access, though, and so using
# it first as a filter is important for performance.
typ = get_proper_type(typ)
if isinstance(typ, TypeVarType):
typ = get_proper_type(typ.upper_bound)
if isinstance(typ, TupleType):
typ = tuple_fallback(typ)
if isinstance(typ, LiteralType):
typ = typ.fallback
if isinstance(typ, Instance):
return typ.type.has_readable_member(member)
if isinstance(typ, FunctionLike) and typ.is_type_obj():
return typ.fallback.type.has_readable_member(member)
elif isinstance(typ, AnyType):
return True
elif isinstance(typ, UnionType):
result = all(self.has_member(x, member) for x in typ.relevant_items())
return result
elif isinstance(typ, TypeType):
# Type[Union[X, ...]] is always normalized to Union[Type[X], ...],
# so we don't need to care about unions here.
item = typ.item
if isinstance(item, TypeVarType):
item = get_proper_type(item.upper_bound)
if isinstance(item, TupleType):
item = tuple_fallback(item)
if isinstance(item, Instance) and item.type.metaclass_type is not None:
return self.has_member(item.type.metaclass_type, member)
if isinstance(item, AnyType):
return True
return False
else:
return False
def not_ready_callback(self, name: str, context: Context) -> None:
"""Called when we can't infer the type of a variable because it's not ready yet.
Either defer type checking of the enclosing function to the next
pass or report an error.
"""
self.chk.handle_cannot_determine_type(name, context)
def visit_yield_expr(self, e: YieldExpr) -> Type:
return_type = self.chk.return_types[-1]
expected_item_type = self.chk.get_generator_yield_type(return_type, False)
if e.expr is None:
if (
not isinstance(get_proper_type(expected_item_type), (NoneType, AnyType))
and self.chk.in_checked_function()
):
self.chk.fail(message_registry.YIELD_VALUE_EXPECTED, e)
else:
actual_item_type = self.accept(e.expr, expected_item_type)
self.chk.check_subtype(
actual_item_type,
expected_item_type,
e,
message_registry.INCOMPATIBLE_TYPES_IN_YIELD,
"actual type",
"expected type",
)
return self.chk.get_generator_receive_type(return_type, False)
def visit_await_expr(self, e: AwaitExpr, allow_none_return: bool = False) -> Type:
expected_type = self.type_context[-1]
if expected_type is not None:
expected_type = self.chk.named_generic_type("typing.Awaitable", [expected_type])
actual_type = get_proper_type(self.accept(e.expr, expected_type))
if isinstance(actual_type, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=actual_type)
ret = self.check_awaitable_expr(
actual_type, e, message_registry.INCOMPATIBLE_TYPES_IN_AWAIT
)
if not allow_none_return and isinstance(get_proper_type(ret), NoneType):
self.chk.msg.does_not_return_value(None, e)
return ret
def check_awaitable_expr(
self, t: Type, ctx: Context, msg: str | ErrorMessage, ignore_binder: bool = False
) -> Type:
"""Check the argument to `await` and extract the type of value.
Also used by `async for` and `async with`.
"""
if not self.chk.check_subtype(
t, self.named_type("typing.Awaitable"), ctx, msg, "actual type", "expected type"
):
return AnyType(TypeOfAny.special_form)
else:
generator = self.check_method_call_by_name("__await__", t, [], [], ctx)[0]
ret_type = self.chk.get_generator_return_type(generator, False)
ret_type = get_proper_type(ret_type)
if (
not ignore_binder
and isinstance(ret_type, UninhabitedType)
and not ret_type.ambiguous
):
self.chk.binder.unreachable()
return ret_type
def visit_yield_from_expr(self, e: YieldFromExpr, allow_none_return: bool = False) -> Type:
# NOTE: Whether `yield from` accepts an `async def` decorated
# with `@types.coroutine` (or `@asyncio.coroutine`) depends on
# whether the generator containing the `yield from` is itself
# thus decorated. But it accepts a generator regardless of
# how it's decorated.
return_type = self.chk.return_types[-1]
# TODO: What should the context for the sub-expression be?
# If the containing function has type Generator[X, Y, ...],
# the context should be Generator[X, Y, T], where T is the
# context of the 'yield from' itself (but it isn't known).
subexpr_type = get_proper_type(self.accept(e.expr))
# Check that the expr is an instance of Iterable and get the type of the iterator produced
# by __iter__.
if isinstance(subexpr_type, AnyType):
iter_type: Type = AnyType(TypeOfAny.from_another_any, source_any=subexpr_type)
elif self.chk.type_is_iterable(subexpr_type):
if is_async_def(subexpr_type) and not has_coroutine_decorator(return_type):
self.chk.msg.yield_from_invalid_operand_type(subexpr_type, e)
any_type = AnyType(TypeOfAny.special_form)
generic_generator_type = self.chk.named_generic_type(
"typing.Generator", [any_type, any_type, any_type]
)
iter_type, _ = self.check_method_call_by_name(
"__iter__", subexpr_type, [], [], context=generic_generator_type
)
else:
if not (is_async_def(subexpr_type) and has_coroutine_decorator(return_type)):
self.chk.msg.yield_from_invalid_operand_type(subexpr_type, e)
iter_type = AnyType(TypeOfAny.from_error)
else:
iter_type = self.check_awaitable_expr(
subexpr_type, e, message_registry.INCOMPATIBLE_TYPES_IN_YIELD_FROM
)
# Check that the iterator's item type matches the type yielded by the Generator function
# containing this `yield from` expression.
expected_item_type = self.chk.get_generator_yield_type(return_type, False)
actual_item_type = self.chk.get_generator_yield_type(iter_type, False)
self.chk.check_subtype(
actual_item_type,
expected_item_type,
e,
message_registry.INCOMPATIBLE_TYPES_IN_YIELD_FROM,
"actual type",
"expected type",
)
# Determine the type of the entire yield from expression.
iter_type = get_proper_type(iter_type)
expr_type = self.chk.get_generator_return_type(iter_type, is_coroutine=False)
if not allow_none_return and isinstance(get_proper_type(expr_type), NoneType):
self.chk.msg.does_not_return_value(None, e)
return expr_type
def visit_temp_node(self, e: TempNode) -> Type:
return e.type
def visit_type_var_expr(self, e: TypeVarExpr) -> Type:
p_default = get_proper_type(e.default)
if not (
isinstance(p_default, AnyType)
and p_default.type_of_any == TypeOfAny.from_omitted_generics
):
if not is_subtype(p_default, e.upper_bound):
self.chk.fail("TypeVar default must be a subtype of the bound type", e)
if e.values and not any(p_default == value for value in e.values):
self.chk.fail("TypeVar default must be one of the constraint types", e)
return AnyType(TypeOfAny.special_form)
def visit_paramspec_expr(self, e: ParamSpecExpr) -> Type:
return AnyType(TypeOfAny.special_form)
def visit_type_var_tuple_expr(self, e: TypeVarTupleExpr) -> Type:
return AnyType(TypeOfAny.special_form)
def visit_newtype_expr(self, e: NewTypeExpr) -> Type:
return AnyType(TypeOfAny.special_form)
def visit_namedtuple_expr(self, e: NamedTupleExpr) -> Type:
tuple_type = e.info.tuple_type
if tuple_type:
if self.chk.options.disallow_any_unimported and has_any_from_unimported_type(
tuple_type
):
self.msg.unimported_type_becomes_any("NamedTuple type", tuple_type, e)
check_for_explicit_any(
tuple_type, self.chk.options, self.chk.is_typeshed_stub, self.msg, context=e
)
return AnyType(TypeOfAny.special_form)
def visit_enum_call_expr(self, e: EnumCallExpr) -> Type:
for name, value in zip(e.items, e.values):
if value is not None:
typ = self.accept(value)
if not isinstance(get_proper_type(typ), AnyType):
var = e.info.names[name].node
if isinstance(var, Var):
# Inline TypeChecker.set_inferred_type(),
# without the lvalue. (This doesn't really do
# much, since the value attribute is defined
# to have type Any in the typeshed stub.)
var.type = typ
var.is_inferred = True
return AnyType(TypeOfAny.special_form)
def visit_typeddict_expr(self, e: TypedDictExpr) -> Type:
return AnyType(TypeOfAny.special_form)
def visit__promote_expr(self, e: PromoteExpr) -> Type:
return e.type
def visit_star_expr(self, e: StarExpr) -> Type:
# TODO: should this ever be called (see e.g. mypyc visitor)?
return self.accept(e.expr)
def object_type(self) -> Instance:
"""Return instance type 'object'."""
return self.named_type("builtins.object")
def bool_type(self) -> Instance:
"""Return instance type 'bool'."""
return self.named_type("builtins.bool")
@overload
def narrow_type_from_binder(self, expr: Expression, known_type: Type) -> Type: ...
@overload
def narrow_type_from_binder(
self, expr: Expression, known_type: Type, skip_non_overlapping: bool
) -> Type | None: ...
def narrow_type_from_binder(
self, expr: Expression, known_type: Type, skip_non_overlapping: bool = False
) -> Type | None:
"""Narrow down a known type of expression using information in conditional type binder.
If 'skip_non_overlapping' is True, return None if the type and restriction are
non-overlapping.
"""
if literal(expr) >= LITERAL_TYPE:
restriction = self.chk.binder.get(expr)
# If the current node is deferred, some variables may get Any types that they
# otherwise wouldn't have. We don't want to narrow down these since it may
# produce invalid inferred Optional[Any] types, at least.
if restriction and not (
isinstance(get_proper_type(known_type), AnyType) and self.chk.current_node_deferred
):
# Note: this call should match the one in narrow_declared_type().
if skip_non_overlapping and not is_overlapping_types(
known_type, restriction, prohibit_none_typevar_overlap=True
):
return None
return narrow_declared_type(known_type, restriction)
return known_type
def has_abstract_type_part(self, caller_type: ProperType, callee_type: ProperType) -> bool:
# TODO: support other possible types here
if isinstance(caller_type, TupleType) and isinstance(callee_type, TupleType):
return any(
self.has_abstract_type(get_proper_type(caller), get_proper_type(callee))
for caller, callee in zip(caller_type.items, callee_type.items)
)
return self.has_abstract_type(caller_type, callee_type)
def has_abstract_type(self, caller_type: ProperType, callee_type: ProperType) -> bool:
return (
isinstance(caller_type, FunctionLike)
and isinstance(callee_type, TypeType)
and caller_type.is_type_obj()
and (caller_type.type_object().is_abstract or caller_type.type_object().is_protocol)
and isinstance(callee_type.item, Instance)
and (callee_type.item.type.is_abstract or callee_type.item.type.is_protocol)
and not self.chk.allow_abstract_call
)
def has_any_type(t: Type, ignore_in_type_obj: bool = False) -> bool:
"""Whether t contains an Any type"""
return t.accept(HasAnyType(ignore_in_type_obj))
class HasAnyType(types.BoolTypeQuery):
def __init__(self, ignore_in_type_obj: bool) -> None:
super().__init__(types.ANY_STRATEGY)
self.ignore_in_type_obj = ignore_in_type_obj
def visit_any(self, t: AnyType) -> bool:
return t.type_of_any != TypeOfAny.special_form # special forms are not real Any types
def visit_callable_type(self, t: CallableType) -> bool:
if self.ignore_in_type_obj and t.is_type_obj():
return False
return super().visit_callable_type(t)
def visit_type_var(self, t: TypeVarType) -> bool:
default = [t.default] if t.has_default() else []
return self.query_types([t.upper_bound, *default] + t.values)
def visit_param_spec(self, t: ParamSpecType) -> bool:
default = [t.default] if t.has_default() else []
return self.query_types([t.upper_bound, *default])
def visit_type_var_tuple(self, t: TypeVarTupleType) -> bool:
default = [t.default] if t.has_default() else []
return self.query_types([t.upper_bound, *default])
def has_coroutine_decorator(t: Type) -> bool:
"""Whether t came from a function decorated with `@coroutine`."""
t = get_proper_type(t)
return isinstance(t, Instance) and t.type.fullname == "typing.AwaitableGenerator"
def is_async_def(t: Type) -> bool:
"""Whether t came from a function defined using `async def`."""
# In check_func_def(), when we see a function decorated with
# `@typing.coroutine` or `@async.coroutine`, we change the
# return type to typing.AwaitableGenerator[...], so that its
# type is compatible with either Generator or Awaitable.
# But for the check here we need to know whether the original
# function (before decoration) was an `async def`. The
# AwaitableGenerator type conveniently preserves the original
# type as its 4th parameter (3rd when using 0-origin indexing
# :-), so that we can recover that information here.
# (We really need to see whether the original, undecorated
# function was an `async def`, which is orthogonal to its
# decorations.)
t = get_proper_type(t)
if (
isinstance(t, Instance)
and t.type.fullname == "typing.AwaitableGenerator"
and len(t.args) >= 4
):
t = get_proper_type(t.args[3])
return isinstance(t, Instance) and t.type.fullname == "typing.Coroutine"
def is_non_empty_tuple(t: Type) -> bool:
t = get_proper_type(t)
return isinstance(t, TupleType) and bool(t.items)
def is_duplicate_mapping(
mapping: list[int], actual_types: list[Type], actual_kinds: list[ArgKind]
) -> bool:
return (
len(mapping) > 1
# Multiple actuals can map to the same formal if they both come from
# varargs (*args and **kwargs); in this case at runtime it is possible
# that here are no duplicates. We need to allow this, as the convention
# f(..., *args, **kwargs) is common enough.
and not (
len(mapping) == 2
and actual_kinds[mapping[0]] == nodes.ARG_STAR
and actual_kinds[mapping[1]] == nodes.ARG_STAR2
)
# Multiple actuals can map to the same formal if there are multiple
# **kwargs which cannot be mapped with certainty (non-TypedDict
# **kwargs).
and not all(
actual_kinds[m] == nodes.ARG_STAR2
and not isinstance(get_proper_type(actual_types[m]), TypedDictType)
for m in mapping
)
)
def replace_callable_return_type(c: CallableType, new_ret_type: Type) -> CallableType:
"""Return a copy of a callable type with a different return type."""
return c.copy_modified(ret_type=new_ret_type)
class ArgInferSecondPassQuery(types.BoolTypeQuery):
"""Query whether an argument type should be inferred in the second pass.
The result is True if the type has a type variable in a callable return
type anywhere. For example, the result for Callable[[], T] is True if t is
a type variable.
"""
def __init__(self) -> None:
super().__init__(types.ANY_STRATEGY)
def visit_callable_type(self, t: CallableType) -> bool:
# TODO: we need to check only for type variables of original callable.
return self.query_types(t.arg_types) or t.accept(HasTypeVarQuery())
class HasTypeVarQuery(types.BoolTypeQuery):
"""Visitor for querying whether a type has a type variable component."""
def __init__(self) -> None:
super().__init__(types.ANY_STRATEGY)
def visit_type_var(self, t: TypeVarType) -> bool:
return True
def visit_param_spec(self, t: ParamSpecType) -> bool:
return True
def visit_type_var_tuple(self, t: TypeVarTupleType) -> bool:
return True
def has_erased_component(t: Type | None) -> bool:
return t is not None and t.accept(HasErasedComponentsQuery())
class HasErasedComponentsQuery(types.BoolTypeQuery):
"""Visitor for querying whether a type has an erased component."""
def __init__(self) -> None:
super().__init__(types.ANY_STRATEGY)
def visit_erased_type(self, t: ErasedType) -> bool:
return True
def has_uninhabited_component(t: Type | None) -> bool:
return t is not None and t.accept(HasUninhabitedComponentsQuery())
class HasUninhabitedComponentsQuery(types.BoolTypeQuery):
"""Visitor for querying whether a type has an UninhabitedType component."""
def __init__(self) -> None:
super().__init__(types.ANY_STRATEGY)
def visit_uninhabited_type(self, t: UninhabitedType) -> bool:
return True
def arg_approximate_similarity(actual: Type, formal: Type) -> bool:
"""Return if caller argument (actual) is roughly compatible with signature arg (formal).
This function is deliberately loose and will report two types are similar
as long as their "shapes" are plausibly the same.
This is useful when we're doing error reporting: for example, if we're trying
to select an overload alternative and there's no exact match, we can use
this function to help us identify which alternative the user might have
*meant* to match.
"""
actual = get_proper_type(actual)
formal = get_proper_type(formal)
# Erase typevars: we'll consider them all to have the same "shape".
if isinstance(actual, TypeVarType):
actual = erase_to_union_or_bound(actual)
if isinstance(formal, TypeVarType):
formal = erase_to_union_or_bound(formal)
# Callable or Type[...]-ish types
def is_typetype_like(typ: ProperType) -> bool:
return (
isinstance(typ, TypeType)
or (isinstance(typ, FunctionLike) and typ.is_type_obj())
or (isinstance(typ, Instance) and typ.type.fullname == "builtins.type")
)
if isinstance(formal, CallableType):
if isinstance(actual, (CallableType, Overloaded, TypeType)):
return True
if is_typetype_like(actual) and is_typetype_like(formal):
return True
# Unions
if isinstance(actual, UnionType):
return any(arg_approximate_similarity(item, formal) for item in actual.relevant_items())
if isinstance(formal, UnionType):
return any(arg_approximate_similarity(actual, item) for item in formal.relevant_items())
# TypedDicts
if isinstance(actual, TypedDictType):
if isinstance(formal, TypedDictType):
return True
return arg_approximate_similarity(actual.fallback, formal)
# Instances
# For instances, we mostly defer to the existing is_subtype check.
if isinstance(formal, Instance):
if isinstance(actual, CallableType):
actual = actual.fallback
if isinstance(actual, Overloaded):
actual = actual.items[0].fallback
if isinstance(actual, TupleType):
actual = tuple_fallback(actual)
if isinstance(actual, Instance) and formal.type in actual.type.mro:
# Try performing a quick check as an optimization
return True
# Fall back to a standard subtype check for the remaining kinds of type.
return is_subtype(erasetype.erase_type(actual), erasetype.erase_type(formal))
def any_causes_overload_ambiguity(
items: list[CallableType],
return_types: list[Type],
arg_types: list[Type],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
) -> bool:
"""May an argument containing 'Any' cause ambiguous result type on call to overloaded function?
Note that this sometimes returns True even if there is no ambiguity, since a correct
implementation would be complex (and the call would be imprecisely typed due to Any
types anyway).
Args:
items: Overload items matching the actual arguments
arg_types: Actual argument types
arg_kinds: Actual argument kinds
arg_names: Actual argument names
"""
if all_same_types(return_types):
return False
actual_to_formal = [
map_formals_to_actuals(
arg_kinds, arg_names, item.arg_kinds, item.arg_names, lambda i: arg_types[i]
)
for item in items
]
for arg_idx, arg_type in enumerate(arg_types):
# We ignore Anys in type object callables as ambiguity
# creators, since that can lead to falsely claiming ambiguity
# for overloads between Type and Callable.
if has_any_type(arg_type, ignore_in_type_obj=True):
matching_formals_unfiltered = [
(item_idx, lookup[arg_idx])
for item_idx, lookup in enumerate(actual_to_formal)
if lookup[arg_idx]
]
matching_returns = []
matching_formals = []
for item_idx, formals in matching_formals_unfiltered:
matched_callable = items[item_idx]
matching_returns.append(matched_callable.ret_type)
# Note: if an actual maps to multiple formals of differing types within
# a single callable, then we know at least one of those formals must be
# a different type then the formal(s) in some other callable.
# So it's safe to just append everything to the same list.
for formal in formals:
matching_formals.append(matched_callable.arg_types[formal])
if not all_same_types(matching_formals) and not all_same_types(matching_returns):
# Any maps to multiple different types, and the return types of these items differ.
return True
return False
def all_same_types(types: list[Type]) -> bool:
if not types:
return True
return all(is_same_type(t, types[0]) for t in types[1:])
def merge_typevars_in_callables_by_name(
callables: Sequence[CallableType],
) -> tuple[list[CallableType], list[TypeVarType]]:
"""Takes all the typevars present in the callables and 'combines' the ones with the same name.
For example, suppose we have two callables with signatures "f(x: T, y: S) -> T" and
"f(x: List[Tuple[T, S]]) -> Tuple[T, S]". Both callables use typevars named "T" and
"S", but we treat them as distinct, unrelated typevars. (E.g. they could both have
distinct ids.)
If we pass in both callables into this function, it returns a list containing two
new callables that are identical in signature, but use the same underlying TypeVarType
for T and S.
This is useful if we want to take the output lists and "merge" them into one callable
in some way -- for example, when unioning together overloads.
Returns both the new list of callables and a list of all distinct TypeVarType objects used.
"""
output: list[CallableType] = []
unique_typevars: dict[str, TypeVarType] = {}
variables: list[TypeVarType] = []
for target in callables:
if target.is_generic():
target = freshen_function_type_vars(target)
rename = {} # Dict[TypeVarId, TypeVar]
for tv in target.variables:
name = tv.fullname
if name not in unique_typevars:
# TODO: support ParamSpecType and TypeVarTuple.
if isinstance(tv, (ParamSpecType, TypeVarTupleType)):
continue
assert isinstance(tv, TypeVarType)
unique_typevars[name] = tv
variables.append(tv)
rename[tv.id] = unique_typevars[name]
target = expand_type(target, rename)
output.append(target)
return output, variables
def try_getting_literal(typ: Type) -> ProperType:
"""If possible, get a more precise literal type for a given type."""
typ = get_proper_type(typ)
if isinstance(typ, Instance) and typ.last_known_value is not None:
return typ.last_known_value
return typ
def is_expr_literal_type(node: Expression) -> bool:
"""Returns 'true' if the given node is a Literal"""
if isinstance(node, IndexExpr):
base = node.base
return isinstance(base, RefExpr) and base.fullname in LITERAL_TYPE_NAMES
if isinstance(node, NameExpr):
underlying = node.node
return isinstance(underlying, TypeAlias) and isinstance(
get_proper_type(underlying.target), LiteralType
)
return False
def has_bytes_component(typ: Type) -> bool:
"""Is this one of builtin byte types, or a union that contains it?"""
typ = get_proper_type(typ)
byte_types = {"builtins.bytes", "builtins.bytearray"}
if isinstance(typ, UnionType):
return any(has_bytes_component(t) for t in typ.items)
if isinstance(typ, Instance) and typ.type.fullname in byte_types:
return True
return False
def type_info_from_type(typ: Type) -> TypeInfo | None:
"""Gets the TypeInfo for a type, indirecting through things like type variables and tuples."""
typ = get_proper_type(typ)
if isinstance(typ, FunctionLike) and typ.is_type_obj():
return typ.type_object()
if isinstance(typ, TypeType):
typ = typ.item
if isinstance(typ, TypeVarType):
typ = get_proper_type(typ.upper_bound)
if isinstance(typ, TupleType):
typ = tuple_fallback(typ)
if isinstance(typ, Instance):
return typ.type
# A complicated type. Too tricky, give up.
# TODO: Do something more clever here.
return None
def is_operator_method(fullname: str | None) -> bool:
if not fullname:
return False
short_name = fullname.split(".")[-1]
return (
short_name in operators.op_methods.values()
or short_name in operators.reverse_op_methods.values()
or short_name in operators.unary_op_methods.values()
)
def get_partial_instance_type(t: Type | None) -> PartialType | None:
if t is None or not isinstance(t, PartialType) or t.type is None:
return None
return t
def is_type_type_context(context: Type | None) -> bool:
context = get_proper_type(context)
if isinstance(context, TypeType):
return True
if isinstance(context, UnionType):
return any(is_type_type_context(item) for item in context.items)
return False
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/checkexpr.py
|
Python
|
NOASSERTION
| 289,197 |
"""Type checking of attribute access"""
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, Sequence, cast
from mypy import meet, message_registry, subtypes
from mypy.erasetype import erase_typevars
from mypy.expandtype import (
expand_self_type,
expand_type_by_instance,
freshen_all_functions_type_vars,
)
from mypy.maptype import map_instance_to_supertype
from mypy.messages import MessageBuilder
from mypy.nodes import (
ARG_POS,
ARG_STAR,
ARG_STAR2,
SYMBOL_FUNCBASE_TYPES,
Context,
Decorator,
FuncBase,
FuncDef,
IndexExpr,
MypyFile,
NameExpr,
OverloadedFuncDef,
SymbolNode,
SymbolTable,
TempNode,
TypeAlias,
TypeInfo,
TypeVarExpr,
Var,
is_final_node,
)
from mypy.plugin import AttributeContext
from mypy.typeops import (
bind_self,
class_callable,
erase_to_bound,
function_type,
get_type_vars,
make_simplified_union,
supported_self_type,
tuple_fallback,
type_object_type_from_function,
)
from mypy.types import (
ENUM_REMOVED_PROPS,
AnyType,
CallableType,
DeletedType,
FunctionLike,
Instance,
LiteralType,
NoneType,
Overloaded,
ParamSpecType,
PartialType,
ProperType,
TupleType,
Type,
TypedDictType,
TypeOfAny,
TypeType,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
UnionType,
get_proper_type,
)
from mypy.typetraverser import TypeTraverserVisitor
if TYPE_CHECKING: # import for forward declaration only
import mypy.checker
from mypy import state
class MemberContext:
"""Information and objects needed to type check attribute access.
Look at the docstring of analyze_member_access for more information.
"""
def __init__(
self,
*,
is_lvalue: bool,
is_super: bool,
is_operator: bool,
original_type: Type,
context: Context,
msg: MessageBuilder,
chk: mypy.checker.TypeChecker,
self_type: Type | None,
module_symbol_table: SymbolTable | None = None,
no_deferral: bool = False,
is_self: bool = False,
) -> None:
self.is_lvalue = is_lvalue
self.is_super = is_super
self.is_operator = is_operator
self.original_type = original_type
self.self_type = self_type or original_type
self.context = context # Error context
self.msg = msg
self.chk = chk
self.module_symbol_table = module_symbol_table
self.no_deferral = no_deferral
self.is_self = is_self
def named_type(self, name: str) -> Instance:
return self.chk.named_type(name)
def not_ready_callback(self, name: str, context: Context) -> None:
self.chk.handle_cannot_determine_type(name, context)
def copy_modified(
self,
*,
messages: MessageBuilder | None = None,
self_type: Type | None = None,
is_lvalue: bool | None = None,
original_type: Type | None = None,
) -> MemberContext:
mx = MemberContext(
is_lvalue=self.is_lvalue,
is_super=self.is_super,
is_operator=self.is_operator,
original_type=self.original_type,
context=self.context,
msg=self.msg,
chk=self.chk,
self_type=self.self_type,
module_symbol_table=self.module_symbol_table,
no_deferral=self.no_deferral,
)
if messages is not None:
mx.msg = messages
if self_type is not None:
mx.self_type = self_type
if is_lvalue is not None:
mx.is_lvalue = is_lvalue
if original_type is not None:
mx.original_type = original_type
return mx
def analyze_member_access(
name: str,
typ: Type,
context: Context,
*,
is_lvalue: bool,
is_super: bool,
is_operator: bool,
msg: MessageBuilder,
original_type: Type,
chk: mypy.checker.TypeChecker,
override_info: TypeInfo | None = None,
in_literal_context: bool = False,
self_type: Type | None = None,
module_symbol_table: SymbolTable | None = None,
no_deferral: bool = False,
is_self: bool = False,
) -> Type:
"""Return the type of attribute 'name' of 'typ'.
The actual implementation is in '_analyze_member_access' and this docstring
also applies to it.
This is a general operation that supports various different variations:
1. lvalue or non-lvalue access (setter or getter access)
2. supertype access when using super() (is_super == True and
'override_info' should refer to the supertype)
'original_type' is the most precise inferred or declared type of the base object
that we have available. When looking for an attribute of 'typ', we may perform
recursive calls targeting the fallback type, and 'typ' may become some supertype
of 'original_type'. 'original_type' is always preserved as the 'typ' type used in
the initial, non-recursive call. The 'self_type' is a component of 'original_type'
to which generic self should be bound (a narrower type that has a fallback to instance).
Currently this is used only for union types.
'module_symbol_table' is passed to this function if 'typ' is actually a module
and we want to keep track of the available attributes of the module (since they
are not available via the type object directly)
"""
mx = MemberContext(
is_lvalue=is_lvalue,
is_super=is_super,
is_operator=is_operator,
original_type=original_type,
context=context,
msg=msg,
chk=chk,
self_type=self_type,
module_symbol_table=module_symbol_table,
no_deferral=no_deferral,
is_self=is_self,
)
result = _analyze_member_access(name, typ, mx, override_info)
possible_literal = get_proper_type(result)
if (
in_literal_context
and isinstance(possible_literal, Instance)
and possible_literal.last_known_value is not None
):
return possible_literal.last_known_value
else:
return result
def _analyze_member_access(
name: str, typ: Type, mx: MemberContext, override_info: TypeInfo | None = None
) -> Type:
# TODO: This and following functions share some logic with subtypes.find_member;
# consider refactoring.
typ = get_proper_type(typ)
if isinstance(typ, Instance):
return analyze_instance_member_access(name, typ, mx, override_info)
elif isinstance(typ, AnyType):
# The base object has dynamic type.
return AnyType(TypeOfAny.from_another_any, source_any=typ)
elif isinstance(typ, UnionType):
return analyze_union_member_access(name, typ, mx)
elif isinstance(typ, FunctionLike) and typ.is_type_obj():
return analyze_type_callable_member_access(name, typ, mx)
elif isinstance(typ, TypeType):
return analyze_type_type_member_access(name, typ, mx, override_info)
elif isinstance(typ, TupleType):
# Actually look up from the fallback instance type.
return _analyze_member_access(name, tuple_fallback(typ), mx, override_info)
elif isinstance(typ, (LiteralType, FunctionLike)):
# Actually look up from the fallback instance type.
return _analyze_member_access(name, typ.fallback, mx, override_info)
elif isinstance(typ, TypedDictType):
return analyze_typeddict_access(name, typ, mx, override_info)
elif isinstance(typ, NoneType):
return analyze_none_member_access(name, typ, mx)
elif isinstance(typ, TypeVarLikeType):
if isinstance(typ, TypeVarType) and typ.values:
return _analyze_member_access(
name, make_simplified_union(typ.values), mx, override_info
)
return _analyze_member_access(name, typ.upper_bound, mx, override_info)
elif isinstance(typ, DeletedType):
mx.msg.deleted_as_rvalue(typ, mx.context)
return AnyType(TypeOfAny.from_error)
return report_missing_attribute(mx.original_type, typ, name, mx)
def may_be_awaitable_attribute(
name: str, typ: Type, mx: MemberContext, override_info: TypeInfo | None = None
) -> bool:
"""Check if the given type has the attribute when awaited."""
if mx.chk.checking_missing_await:
# Avoid infinite recursion.
return False
with mx.chk.checking_await_set(), mx.msg.filter_errors() as local_errors:
aw_type = mx.chk.get_precise_awaitable_type(typ, local_errors)
if aw_type is None:
return False
_ = _analyze_member_access(name, aw_type, mx, override_info)
return not local_errors.has_new_errors()
def report_missing_attribute(
original_type: Type,
typ: Type,
name: str,
mx: MemberContext,
override_info: TypeInfo | None = None,
) -> Type:
error_code = mx.msg.has_no_attr(original_type, typ, name, mx.context, mx.module_symbol_table)
if not mx.msg.prefer_simple_messages():
if may_be_awaitable_attribute(name, typ, mx, override_info):
mx.msg.possible_missing_await(mx.context, error_code)
return AnyType(TypeOfAny.from_error)
# The several functions that follow implement analyze_member_access for various
# types and aren't documented individually.
def analyze_instance_member_access(
name: str, typ: Instance, mx: MemberContext, override_info: TypeInfo | None
) -> Type:
if name == "__init__" and not mx.is_super:
# Accessing __init__ in statically typed code would compromise
# type safety unless used via super().
mx.msg.fail(message_registry.CANNOT_ACCESS_INIT, mx.context)
return AnyType(TypeOfAny.from_error)
# The base object has an instance type.
info = typ.type
if override_info:
info = override_info
if (
state.find_occurrences
and info.name == state.find_occurrences[0]
and name == state.find_occurrences[1]
):
mx.msg.note("Occurrence of '{}.{}'".format(*state.find_occurrences), mx.context)
# Look up the member. First look up the method dictionary.
method = info.get_method(name)
if method and not isinstance(method, Decorator):
if mx.is_super:
validate_super_call(method, mx)
if method.is_property:
assert isinstance(method, OverloadedFuncDef)
first_item = method.items[0]
assert isinstance(first_item, Decorator)
return analyze_var(name, first_item.var, typ, info, mx)
if mx.is_lvalue:
mx.msg.cant_assign_to_method(mx.context)
if not isinstance(method, OverloadedFuncDef):
signature = function_type(method, mx.named_type("builtins.function"))
else:
if method.type is None:
# Overloads may be not ready if they are decorated. Handle this in same
# manner as we would handle a regular decorated function: defer if possible.
if not mx.no_deferral and method.items:
mx.not_ready_callback(method.name, mx.context)
return AnyType(TypeOfAny.special_form)
assert isinstance(method.type, Overloaded)
signature = method.type
signature = freshen_all_functions_type_vars(signature)
if not method.is_static:
# TODO: use proper treatment of special methods on unions instead
# of this hack here and below (i.e. mx.self_type).
dispatched_type = meet.meet_types(mx.original_type, typ)
signature = check_self_arg(
signature, dispatched_type, method.is_class, mx.context, name, mx.msg
)
signature = bind_self(signature, mx.self_type, is_classmethod=method.is_class)
# TODO: should we skip these steps for static methods as well?
# Since generic static methods should not be allowed.
typ = map_instance_to_supertype(typ, method.info)
member_type = expand_type_by_instance(signature, typ)
freeze_all_type_vars(member_type)
return member_type
else:
# Not a method.
return analyze_member_var_access(name, typ, info, mx)
def validate_super_call(node: FuncBase, mx: MemberContext) -> None:
unsafe_super = False
if isinstance(node, FuncDef) and node.is_trivial_body:
unsafe_super = True
impl = node
elif isinstance(node, OverloadedFuncDef):
if node.impl:
impl = node.impl if isinstance(node.impl, FuncDef) else node.impl.func
unsafe_super = impl.is_trivial_body
if unsafe_super:
mx.msg.unsafe_super(node.name, node.info.name, mx.context)
def analyze_type_callable_member_access(name: str, typ: FunctionLike, mx: MemberContext) -> Type:
# Class attribute.
# TODO super?
ret_type = typ.items[0].ret_type
assert isinstance(ret_type, ProperType)
if isinstance(ret_type, TupleType):
ret_type = tuple_fallback(ret_type)
if isinstance(ret_type, TypedDictType):
ret_type = ret_type.fallback
if isinstance(ret_type, Instance):
if not mx.is_operator:
# When Python sees an operator (eg `3 == 4`), it automatically translates that
# into something like `int.__eq__(3, 4)` instead of `(3).__eq__(4)` as an
# optimization.
#
# While it normally it doesn't matter which of the two versions are used, it
# does cause inconsistencies when working with classes. For example, translating
# `int == int` to `int.__eq__(int)` would not work since `int.__eq__` is meant to
# compare two int _instances_. What we really want is `type(int).__eq__`, which
# is meant to compare two types or classes.
#
# This check makes sure that when we encounter an operator, we skip looking up
# the corresponding method in the current instance to avoid this edge case.
# See https://github.com/python/mypy/pull/1787 for more info.
# TODO: do not rely on same type variables being present in all constructor overloads.
result = analyze_class_attribute_access(
ret_type, name, mx, original_vars=typ.items[0].variables, mcs_fallback=typ.fallback
)
if result:
return result
# Look up from the 'type' type.
return _analyze_member_access(name, typ.fallback, mx)
else:
assert False, f"Unexpected type {ret_type!r}"
def analyze_type_type_member_access(
name: str, typ: TypeType, mx: MemberContext, override_info: TypeInfo | None
) -> Type:
# Similar to analyze_type_callable_attribute_access.
item = None
fallback = mx.named_type("builtins.type")
if isinstance(typ.item, Instance):
item = typ.item
elif isinstance(typ.item, AnyType):
with mx.msg.filter_errors():
return _analyze_member_access(name, fallback, mx, override_info)
elif isinstance(typ.item, TypeVarType):
upper_bound = get_proper_type(typ.item.upper_bound)
if isinstance(upper_bound, Instance):
item = upper_bound
elif isinstance(upper_bound, UnionType):
return _analyze_member_access(
name,
TypeType.make_normalized(upper_bound, line=typ.line, column=typ.column),
mx,
override_info,
)
elif isinstance(upper_bound, TupleType):
item = tuple_fallback(upper_bound)
elif isinstance(upper_bound, AnyType):
with mx.msg.filter_errors():
return _analyze_member_access(name, fallback, mx, override_info)
elif isinstance(typ.item, TupleType):
item = tuple_fallback(typ.item)
elif isinstance(typ.item, FunctionLike) and typ.item.is_type_obj():
item = typ.item.fallback
elif isinstance(typ.item, TypeType):
# Access member on metaclass object via Type[Type[C]]
if isinstance(typ.item.item, Instance):
item = typ.item.item.type.metaclass_type
ignore_messages = False
if item is not None:
fallback = item.type.metaclass_type or fallback
if item and not mx.is_operator:
# See comment above for why operators are skipped
result = analyze_class_attribute_access(
item, name, mx, mcs_fallback=fallback, override_info=override_info
)
if result:
if not (isinstance(get_proper_type(result), AnyType) and item.type.fallback_to_any):
return result
else:
# We don't want errors on metaclass lookup for classes with Any fallback
ignore_messages = True
with mx.msg.filter_errors(filter_errors=ignore_messages):
return _analyze_member_access(name, fallback, mx, override_info)
def analyze_union_member_access(name: str, typ: UnionType, mx: MemberContext) -> Type:
with mx.msg.disable_type_names():
results = []
for subtype in typ.relevant_items():
# Self types should be bound to every individual item of a union.
item_mx = mx.copy_modified(self_type=subtype)
results.append(_analyze_member_access(name, subtype, item_mx))
return make_simplified_union(results)
def analyze_none_member_access(name: str, typ: NoneType, mx: MemberContext) -> Type:
if name == "__bool__":
literal_false = LiteralType(False, fallback=mx.named_type("builtins.bool"))
return CallableType(
arg_types=[],
arg_kinds=[],
arg_names=[],
ret_type=literal_false,
fallback=mx.named_type("builtins.function"),
)
else:
return _analyze_member_access(name, mx.named_type("builtins.object"), mx)
def analyze_member_var_access(
name: str, itype: Instance, info: TypeInfo, mx: MemberContext
) -> Type:
"""Analyse attribute access that does not target a method.
This is logically part of analyze_member_access and the arguments are similar.
original_type is the type of E in the expression E.var
"""
# It was not a method. Try looking up a variable.
v = lookup_member_var_or_accessor(info, name, mx.is_lvalue)
vv = v
if isinstance(vv, Decorator):
# The associated Var node of a decorator contains the type.
v = vv.var
if mx.is_super:
validate_super_call(vv.func, mx)
if isinstance(vv, TypeInfo):
# If the associated variable is a TypeInfo synthesize a Var node for
# the purposes of type checking. This enables us to type check things
# like accessing class attributes on an inner class.
v = Var(name, type=type_object_type(vv, mx.named_type))
v.info = info
if isinstance(vv, TypeAlias):
# Similar to the above TypeInfo case, we allow using
# qualified type aliases in runtime context if it refers to an
# instance type. For example:
# class C:
# A = List[int]
# x = C.A() <- this is OK
typ = mx.chk.expr_checker.alias_type_in_runtime_context(
vv, ctx=mx.context, alias_definition=mx.is_lvalue
)
v = Var(name, type=typ)
v.info = info
if isinstance(v, Var):
implicit = info[name].implicit
# An assignment to final attribute is always an error,
# independently of types.
if mx.is_lvalue and not mx.chk.get_final_context():
check_final_member(name, info, mx.msg, mx.context)
return analyze_var(name, v, itype, info, mx, implicit=implicit)
elif isinstance(v, FuncDef):
assert False, "Did not expect a function"
elif isinstance(v, MypyFile):
mx.chk.module_refs.add(v.fullname)
return mx.chk.expr_checker.module_type(v)
elif (
not v
and name not in ["__getattr__", "__setattr__", "__getattribute__"]
and not mx.is_operator
and mx.module_symbol_table is None
):
# Above we skip ModuleType.__getattr__ etc. if we have a
# module symbol table, since the symbol table allows precise
# checking.
if not mx.is_lvalue:
for method_name in ("__getattribute__", "__getattr__"):
method = info.get_method(method_name)
# __getattribute__ is defined on builtins.object and returns Any, so without
# the guard this search will always find object.__getattribute__ and conclude
# that the attribute exists
if method and method.info.fullname != "builtins.object":
bound_method = analyze_decorator_or_funcbase_access(
defn=method,
itype=itype,
info=info,
self_type=mx.self_type,
name=method_name,
mx=mx,
)
typ = map_instance_to_supertype(itype, method.info)
getattr_type = get_proper_type(expand_type_by_instance(bound_method, typ))
if isinstance(getattr_type, CallableType):
result = getattr_type.ret_type
else:
result = getattr_type
# Call the attribute hook before returning.
fullname = f"{method.info.fullname}.{name}"
hook = mx.chk.plugin.get_attribute_hook(fullname)
if hook:
result = hook(
AttributeContext(
get_proper_type(mx.original_type), result, mx.context, mx.chk
)
)
return result
else:
setattr_meth = info.get_method("__setattr__")
if setattr_meth and setattr_meth.info.fullname != "builtins.object":
bound_type = analyze_decorator_or_funcbase_access(
defn=setattr_meth,
itype=itype,
info=info,
self_type=mx.self_type,
name=name,
mx=mx.copy_modified(is_lvalue=False),
)
typ = map_instance_to_supertype(itype, setattr_meth.info)
setattr_type = get_proper_type(expand_type_by_instance(bound_type, typ))
if isinstance(setattr_type, CallableType) and len(setattr_type.arg_types) > 0:
return setattr_type.arg_types[-1]
if itype.type.fallback_to_any:
return AnyType(TypeOfAny.special_form)
# Could not find the member.
if itype.extra_attrs and name in itype.extra_attrs.attrs:
# For modules use direct symbol table lookup.
if not itype.extra_attrs.mod_name:
return itype.extra_attrs.attrs[name]
if mx.is_super:
mx.msg.undefined_in_superclass(name, mx.context)
return AnyType(TypeOfAny.from_error)
else:
ret = report_missing_attribute(mx.original_type, itype, name, mx)
# Avoid paying double jeopardy if we can't find the member due to --no-implicit-reexport
if (
mx.module_symbol_table is not None
and name in mx.module_symbol_table
and not mx.module_symbol_table[name].module_public
):
v = mx.module_symbol_table[name].node
e = NameExpr(name)
e.set_line(mx.context)
e.node = v
return mx.chk.expr_checker.analyze_ref_expr(e, lvalue=mx.is_lvalue)
return ret
def check_final_member(name: str, info: TypeInfo, msg: MessageBuilder, ctx: Context) -> None:
"""Give an error if the name being assigned was declared as final."""
for base in info.mro:
sym = base.names.get(name)
if sym and is_final_node(sym.node):
msg.cant_assign_to_final(name, attr_assign=True, ctx=ctx)
def analyze_descriptor_access(descriptor_type: Type, mx: MemberContext) -> Type:
"""Type check descriptor access.
Arguments:
descriptor_type: The type of the descriptor attribute being accessed
(the type of ``f`` in ``a.f`` when ``f`` is a descriptor).
mx: The current member access context.
Return:
The return type of the appropriate ``__get__`` overload for the descriptor.
"""
instance_type = get_proper_type(mx.self_type)
orig_descriptor_type = descriptor_type
descriptor_type = get_proper_type(descriptor_type)
if isinstance(descriptor_type, UnionType):
# Map the access over union types
return make_simplified_union(
[analyze_descriptor_access(typ, mx) for typ in descriptor_type.items]
)
elif not isinstance(descriptor_type, Instance):
return orig_descriptor_type
if not descriptor_type.type.has_readable_member("__get__"):
return orig_descriptor_type
dunder_get = descriptor_type.type.get_method("__get__")
if dunder_get is None:
mx.msg.fail(
message_registry.DESCRIPTOR_GET_NOT_CALLABLE.format(
descriptor_type.str_with_options(mx.msg.options)
),
mx.context,
)
return AnyType(TypeOfAny.from_error)
bound_method = analyze_decorator_or_funcbase_access(
defn=dunder_get,
itype=descriptor_type,
info=descriptor_type.type,
self_type=descriptor_type,
name="__get__",
mx=mx,
)
typ = map_instance_to_supertype(descriptor_type, dunder_get.info)
dunder_get_type = expand_type_by_instance(bound_method, typ)
if isinstance(instance_type, FunctionLike) and instance_type.is_type_obj():
owner_type = instance_type.items[0].ret_type
instance_type = NoneType()
elif isinstance(instance_type, TypeType):
owner_type = instance_type.item
instance_type = NoneType()
else:
owner_type = instance_type
callable_name = mx.chk.expr_checker.method_fullname(descriptor_type, "__get__")
dunder_get_type = mx.chk.expr_checker.transform_callee_type(
callable_name,
dunder_get_type,
[
TempNode(instance_type, context=mx.context),
TempNode(TypeType.make_normalized(owner_type), context=mx.context),
],
[ARG_POS, ARG_POS],
mx.context,
object_type=descriptor_type,
)
_, inferred_dunder_get_type = mx.chk.expr_checker.check_call(
dunder_get_type,
[
TempNode(instance_type, context=mx.context),
TempNode(TypeType.make_normalized(owner_type), context=mx.context),
],
[ARG_POS, ARG_POS],
mx.context,
object_type=descriptor_type,
callable_name=callable_name,
)
inferred_dunder_get_type = get_proper_type(inferred_dunder_get_type)
if isinstance(inferred_dunder_get_type, AnyType):
# check_call failed, and will have reported an error
return inferred_dunder_get_type
if not isinstance(inferred_dunder_get_type, CallableType):
mx.msg.fail(
message_registry.DESCRIPTOR_GET_NOT_CALLABLE.format(
descriptor_type.str_with_options(mx.msg.options)
),
mx.context,
)
return AnyType(TypeOfAny.from_error)
return inferred_dunder_get_type.ret_type
def is_instance_var(var: Var) -> bool:
"""Return if var is an instance variable according to PEP 526."""
return (
# check the type_info node is the var (not a decorated function, etc.)
var.name in var.info.names
and var.info.names[var.name].node is var
and not var.is_classvar
# variables without annotations are treated as classvar
and not var.is_inferred
)
def analyze_var(
name: str,
var: Var,
itype: Instance,
info: TypeInfo,
mx: MemberContext,
*,
implicit: bool = False,
) -> Type:
"""Analyze access to an attribute via a Var node.
This is conceptually part of analyze_member_access and the arguments are similar.
itype is the instance type in which attribute should be looked up
original_type is the type of E in the expression E.var
if implicit is True, the original Var was created as an assignment to self
"""
# Found a member variable.
original_itype = itype
itype = map_instance_to_supertype(itype, var.info)
typ = var.type
if typ:
if isinstance(typ, PartialType):
return mx.chk.handle_partial_var_type(typ, mx.is_lvalue, var, mx.context)
if mx.is_lvalue and var.is_property and not var.is_settable_property:
# TODO allow setting attributes in subclass (although it is probably an error)
mx.msg.read_only_property(name, itype.type, mx.context)
if mx.is_lvalue and var.is_classvar:
mx.msg.cant_assign_to_classvar(name, mx.context)
t = freshen_all_functions_type_vars(typ)
t = expand_self_type_if_needed(t, mx, var, original_itype)
t = expand_type_by_instance(t, itype)
freeze_all_type_vars(t)
result = t
typ = get_proper_type(typ)
call_type: ProperType | None = None
if var.is_initialized_in_class and (not is_instance_var(var) or mx.is_operator):
if isinstance(typ, FunctionLike) and not typ.is_type_obj():
call_type = typ
elif var.is_property:
call_type = get_proper_type(_analyze_member_access("__call__", typ, mx))
else:
call_type = typ
if isinstance(call_type, FunctionLike) and not call_type.is_type_obj():
if mx.is_lvalue:
if var.is_property:
if not var.is_settable_property:
mx.msg.read_only_property(name, itype.type, mx.context)
else:
mx.msg.cant_assign_to_method(mx.context)
if not var.is_staticmethod:
# Class-level function objects and classmethods become bound methods:
# the former to the instance, the latter to the class.
functype: FunctionLike = call_type
# Use meet to narrow original_type to the dispatched type.
# For example, assume
# * A.f: Callable[[A1], None] where A1 <: A (maybe A1 == A)
# * B.f: Callable[[B1], None] where B1 <: B (maybe B1 == B)
# * x: Union[A1, B1]
# In `x.f`, when checking `x` against A1 we assume x is compatible with A
# and similarly for B1 when checking against B
dispatched_type = meet.meet_types(mx.original_type, itype)
signature = freshen_all_functions_type_vars(functype)
bound = get_proper_type(expand_self_type(var, signature, mx.original_type))
assert isinstance(bound, FunctionLike)
signature = bound
signature = check_self_arg(
signature, dispatched_type, var.is_classmethod, mx.context, name, mx.msg
)
signature = bind_self(signature, mx.self_type, var.is_classmethod)
expanded_signature = expand_type_by_instance(signature, itype)
freeze_all_type_vars(expanded_signature)
if var.is_property:
# A property cannot have an overloaded type => the cast is fine.
assert isinstance(expanded_signature, CallableType)
result = expanded_signature.ret_type
else:
result = expanded_signature
else:
if not var.is_ready and not mx.no_deferral:
mx.not_ready_callback(var.name, mx.context)
# Implicit 'Any' type.
result = AnyType(TypeOfAny.special_form)
fullname = f"{var.info.fullname}.{name}"
hook = mx.chk.plugin.get_attribute_hook(fullname)
if result and not mx.is_lvalue and not implicit:
result = analyze_descriptor_access(result, mx)
if hook:
result = hook(
AttributeContext(get_proper_type(mx.original_type), result, mx.context, mx.chk)
)
return result
def expand_self_type_if_needed(
t: Type, mx: MemberContext, var: Var, itype: Instance, is_class: bool = False
) -> Type:
"""Expand special Self type in a backwards compatible manner.
This should ensure that mixing old-style and new-style self-types work
seamlessly. Also, re-bind new style self-types in subclasses if needed.
"""
original = get_proper_type(mx.self_type)
if not (mx.is_self or mx.is_super):
repl = mx.self_type
if is_class:
if isinstance(original, TypeType):
repl = original.item
elif isinstance(original, CallableType):
# Problematic access errors should have been already reported.
repl = erase_typevars(original.ret_type)
else:
repl = itype
return expand_self_type(var, t, repl)
elif supported_self_type(
# Support compatibility with plain old style T -> T and Type[T] -> T only.
get_proper_type(mx.self_type),
allow_instances=False,
allow_callable=False,
):
repl = mx.self_type
if is_class and isinstance(original, TypeType):
repl = original.item
return expand_self_type(var, t, repl)
elif (
mx.is_self
and itype.type != var.info
# If an attribute with Self-type was defined in a supertype, we need to
# rebind the Self type variable to Self type variable of current class...
and itype.type.self_type is not None
# ...unless `self` has an explicit non-trivial annotation.
and itype == mx.chk.scope.active_self_type()
):
return expand_self_type(var, t, itype.type.self_type)
else:
return t
def freeze_all_type_vars(member_type: Type) -> None:
member_type.accept(FreezeTypeVarsVisitor())
class FreezeTypeVarsVisitor(TypeTraverserVisitor):
def visit_callable_type(self, t: CallableType) -> None:
for v in t.variables:
v.id.meta_level = 0
super().visit_callable_type(t)
def lookup_member_var_or_accessor(info: TypeInfo, name: str, is_lvalue: bool) -> SymbolNode | None:
"""Find the attribute/accessor node that refers to a member of a type."""
# TODO handle lvalues
node = info.get(name)
if node:
return node.node
else:
return None
def check_self_arg(
functype: FunctionLike,
dispatched_arg_type: Type,
is_classmethod: bool,
context: Context,
name: str,
msg: MessageBuilder,
) -> FunctionLike:
"""Check that an instance has a valid type for a method with annotated 'self'.
For example if the method is defined as:
class A:
def f(self: S) -> T: ...
then for 'x.f' we check that meet(type(x), A) <: S. If the method is overloaded, we
select only overloads items that satisfy this requirement. If there are no matching
overloads, an error is generated.
Note: dispatched_arg_type uses a meet to select a relevant item in case if the
original type of 'x' is a union. This is done because several special methods
treat union types in ad-hoc manner, so we can't use MemberContext.self_type yet.
"""
items = functype.items
if not items:
return functype
new_items = []
if is_classmethod:
dispatched_arg_type = TypeType.make_normalized(dispatched_arg_type)
for item in items:
if not item.arg_types or item.arg_kinds[0] not in (ARG_POS, ARG_STAR):
# No positional first (self) argument (*args is okay).
msg.no_formal_self(name, item, context)
# This is pretty bad, so just return the original signature if
# there is at least one such error.
return functype
else:
selfarg = get_proper_type(item.arg_types[0])
# This level of erasure matches the one in checker.check_func_def(),
# better keep these two checks consistent.
if subtypes.is_subtype(dispatched_arg_type, erase_typevars(erase_to_bound(selfarg))):
new_items.append(item)
elif isinstance(selfarg, ParamSpecType):
# TODO: This is not always right. What's the most reasonable thing to do here?
new_items.append(item)
elif isinstance(selfarg, TypeVarTupleType):
raise NotImplementedError
if not new_items:
# Choose first item for the message (it may be not very helpful for overloads).
msg.incompatible_self_argument(
name, dispatched_arg_type, items[0], is_classmethod, context
)
return functype
if len(new_items) == 1:
return new_items[0]
return Overloaded(new_items)
def analyze_class_attribute_access(
itype: Instance,
name: str,
mx: MemberContext,
*,
mcs_fallback: Instance,
override_info: TypeInfo | None = None,
original_vars: Sequence[TypeVarLikeType] | None = None,
) -> Type | None:
"""Analyze access to an attribute on a class object.
itype is the return type of the class object callable, original_type is the type
of E in the expression E.var, original_vars are type variables of the class callable
(for generic classes).
"""
info = itype.type
if override_info:
info = override_info
fullname = f"{info.fullname}.{name}"
hook = mx.chk.plugin.get_class_attribute_hook(fullname)
node = info.get(name)
if not node:
if itype.extra_attrs and name in itype.extra_attrs.attrs:
# For modules use direct symbol table lookup.
if not itype.extra_attrs.mod_name:
return itype.extra_attrs.attrs[name]
if info.fallback_to_any or info.meta_fallback_to_any:
return apply_class_attr_hook(mx, hook, AnyType(TypeOfAny.special_form))
return None
if (
isinstance(node.node, Var)
and not node.node.is_classvar
and not hook
and mcs_fallback.type.get(name)
):
# If the same attribute is declared on the metaclass and the class but with different types,
# and the attribute on the class is not a ClassVar,
# the type of the attribute on the metaclass should take priority
# over the type of the attribute on the class,
# when the attribute is being accessed from the class object itself.
#
# Return `None` here to signify that the name should be looked up
# on the class object itself rather than the instance.
return None
is_decorated = isinstance(node.node, Decorator)
is_method = is_decorated or isinstance(node.node, FuncBase)
if mx.is_lvalue:
if is_method:
mx.msg.cant_assign_to_method(mx.context)
if isinstance(node.node, TypeInfo):
mx.msg.fail(message_registry.CANNOT_ASSIGN_TO_TYPE, mx.context)
# Refuse class attribute access if slot defined
if info.slots and name in info.slots:
mx.msg.fail(message_registry.CLASS_VAR_CONFLICTS_SLOTS.format(name), mx.context)
# If a final attribute was declared on `self` in `__init__`, then it
# can't be accessed on the class object.
if node.implicit and isinstance(node.node, Var) and node.node.is_final:
mx.msg.fail(
message_registry.CANNOT_ACCESS_FINAL_INSTANCE_ATTR.format(node.node.name), mx.context
)
# An assignment to final attribute on class object is also always an error,
# independently of types.
if mx.is_lvalue and not mx.chk.get_final_context():
check_final_member(name, info, mx.msg, mx.context)
if info.is_enum and not (mx.is_lvalue or is_decorated or is_method):
enum_class_attribute_type = analyze_enum_class_attribute_access(itype, name, mx)
if enum_class_attribute_type:
return apply_class_attr_hook(mx, hook, enum_class_attribute_type)
t = node.type
if t:
if isinstance(t, PartialType):
symnode = node.node
assert isinstance(symnode, Var)
return apply_class_attr_hook(
mx, hook, mx.chk.handle_partial_var_type(t, mx.is_lvalue, symnode, mx.context)
)
# Find the class where method/variable was defined.
if isinstance(node.node, Decorator):
super_info: TypeInfo | None = node.node.var.info
elif isinstance(node.node, (Var, SYMBOL_FUNCBASE_TYPES)):
super_info = node.node.info
else:
super_info = None
# Map the type to how it would look as a defining class. For example:
# class C(Generic[T]): ...
# class D(C[Tuple[T, S]]): ...
# D[int, str].method()
# Here itype is D[int, str], isuper is C[Tuple[int, str]].
if not super_info:
isuper = None
else:
isuper = map_instance_to_supertype(itype, super_info)
if isinstance(node.node, Var):
assert isuper is not None
# Check if original variable type has type variables. For example:
# class C(Generic[T]):
# x: T
# C.x # Error, ambiguous access
# C[int].x # Also an error, since C[int] is same as C at runtime
# Exception is Self type wrapped in ClassVar, that is safe.
def_vars = set(node.node.info.defn.type_vars)
if not node.node.is_classvar and node.node.info.self_type:
def_vars.add(node.node.info.self_type)
typ_vars = set(get_type_vars(t))
if def_vars & typ_vars:
# Exception: access on Type[...], including first argument of class methods is OK.
if not isinstance(get_proper_type(mx.original_type), TypeType) or node.implicit:
if node.node.is_classvar:
message = message_registry.GENERIC_CLASS_VAR_ACCESS
else:
message = message_registry.GENERIC_INSTANCE_VAR_CLASS_ACCESS
mx.msg.fail(message, mx.context)
t = expand_self_type_if_needed(t, mx, node.node, itype, is_class=True)
# Erase non-mapped variables, but keep mapped ones, even if there is an error.
# In the above example this means that we infer following types:
# C.x -> Any
# C[int].x -> int
t = erase_typevars(expand_type_by_instance(t, isuper), {tv.id for tv in def_vars})
is_classmethod = (is_decorated and cast(Decorator, node.node).func.is_class) or (
isinstance(node.node, FuncBase) and node.node.is_class
)
is_staticmethod = (is_decorated and cast(Decorator, node.node).func.is_static) or (
isinstance(node.node, FuncBase) and node.node.is_static
)
t = get_proper_type(t)
if isinstance(t, FunctionLike) and is_classmethod:
t = check_self_arg(t, mx.self_type, False, mx.context, name, mx.msg)
result = add_class_tvars(
t, isuper, is_classmethod, is_staticmethod, mx.self_type, original_vars=original_vars
)
if not mx.is_lvalue:
result = analyze_descriptor_access(result, mx)
return apply_class_attr_hook(mx, hook, result)
elif isinstance(node.node, Var):
mx.not_ready_callback(name, mx.context)
return AnyType(TypeOfAny.special_form)
if isinstance(node.node, TypeVarExpr):
mx.msg.fail(
message_registry.CANNOT_USE_TYPEVAR_AS_EXPRESSION.format(info.name, name), mx.context
)
return AnyType(TypeOfAny.from_error)
if isinstance(node.node, TypeInfo):
return type_object_type(node.node, mx.named_type)
if isinstance(node.node, MypyFile):
# Reference to a module object.
return mx.named_type("types.ModuleType")
if isinstance(node.node, TypeAlias):
return mx.chk.expr_checker.alias_type_in_runtime_context(
node.node, ctx=mx.context, alias_definition=mx.is_lvalue
)
if is_decorated:
assert isinstance(node.node, Decorator)
if node.node.type:
return apply_class_attr_hook(mx, hook, node.node.type)
else:
mx.not_ready_callback(name, mx.context)
return AnyType(TypeOfAny.from_error)
else:
assert isinstance(node.node, FuncBase)
typ = function_type(node.node, mx.named_type("builtins.function"))
# Note: if we are accessing class method on class object, the cls argument is bound.
# Annotated and/or explicit class methods go through other code paths above, for
# unannotated implicit class methods we do this here.
if node.node.is_class:
typ = bind_self(typ, is_classmethod=True)
return apply_class_attr_hook(mx, hook, typ)
def apply_class_attr_hook(
mx: MemberContext, hook: Callable[[AttributeContext], Type] | None, result: Type
) -> Type | None:
if hook:
result = hook(
AttributeContext(get_proper_type(mx.original_type), result, mx.context, mx.chk)
)
return result
def analyze_enum_class_attribute_access(
itype: Instance, name: str, mx: MemberContext
) -> Type | None:
# Skip these since Enum will remove it
if name in ENUM_REMOVED_PROPS:
return report_missing_attribute(mx.original_type, itype, name, mx)
# Dunders and private names are not Enum members
if name.startswith("__") and name.replace("_", "") != "":
return None
node = itype.type.get(name)
if node and node.type:
proper = get_proper_type(node.type)
# Support `A = nonmember(1)` function call and decorator.
if (
isinstance(proper, Instance)
and proper.type.fullname == "enum.nonmember"
and proper.args
):
return proper.args[0]
enum_literal = LiteralType(name, fallback=itype)
return itype.copy_modified(last_known_value=enum_literal)
def analyze_typeddict_access(
name: str, typ: TypedDictType, mx: MemberContext, override_info: TypeInfo | None
) -> Type:
if name == "__setitem__":
if isinstance(mx.context, IndexExpr):
# Since we can get this during `a['key'] = ...`
# it is safe to assume that the context is `IndexExpr`.
item_type, key_names = mx.chk.expr_checker.visit_typeddict_index_expr(
typ, mx.context.index, setitem=True
)
assigned_readonly_keys = typ.readonly_keys & key_names
if assigned_readonly_keys:
mx.msg.readonly_keys_mutated(assigned_readonly_keys, context=mx.context)
else:
# It can also be `a.__setitem__(...)` direct call.
# In this case `item_type` can be `Any`,
# because we don't have args available yet.
# TODO: check in `default` plugin that `__setitem__` is correct.
item_type = AnyType(TypeOfAny.implementation_artifact)
return CallableType(
arg_types=[mx.chk.named_type("builtins.str"), item_type],
arg_kinds=[ARG_POS, ARG_POS],
arg_names=[None, None],
ret_type=NoneType(),
fallback=mx.chk.named_type("builtins.function"),
name=name,
)
elif name == "__delitem__":
return CallableType(
arg_types=[mx.chk.named_type("builtins.str")],
arg_kinds=[ARG_POS],
arg_names=[None],
ret_type=NoneType(),
fallback=mx.chk.named_type("builtins.function"),
name=name,
)
return _analyze_member_access(name, typ.fallback, mx, override_info)
def add_class_tvars(
t: ProperType,
isuper: Instance | None,
is_classmethod: bool,
is_staticmethod: bool,
original_type: Type,
original_vars: Sequence[TypeVarLikeType] | None = None,
) -> Type:
"""Instantiate type variables during analyze_class_attribute_access,
e.g T and Q in the following:
class A(Generic[T]):
@classmethod
def foo(cls: Type[Q]) -> Tuple[T, Q]: ...
class B(A[str]): pass
B.foo()
Args:
t: Declared type of the method (or property)
isuper: Current instance mapped to the superclass where method was defined, this
is usually done by map_instance_to_supertype()
is_classmethod: True if this method is decorated with @classmethod
is_staticmethod: True if this method is decorated with @staticmethod
original_type: The value of the type B in the expression B.foo() or the corresponding
component in case of a union (this is used to bind the self-types)
original_vars: Type variables of the class callable on which the method was accessed
Returns:
Expanded method type with added type variables (when needed).
"""
# TODO: verify consistency between Q and T
# We add class type variables if the class method is accessed on class object
# without applied type arguments, this matches the behavior of __init__().
# For example (continuing the example in docstring):
# A # The type of callable is def [T] () -> A[T], _not_ def () -> A[Any]
# A[int] # The type of callable is def () -> A[int]
# and
# A.foo # The type is generic def [T] () -> Tuple[T, A[T]]
# A[int].foo # The type is non-generic def () -> Tuple[int, A[int]]
#
# This behaviour is useful for defining alternative constructors for generic classes.
# To achieve such behaviour, we add the class type variables that are still free
# (i.e. appear in the return type of the class object on which the method was accessed).
if isinstance(t, CallableType):
tvars = original_vars if original_vars is not None else []
t = freshen_all_functions_type_vars(t)
if is_classmethod:
t = bind_self(t, original_type, is_classmethod=True)
if is_classmethod or is_staticmethod:
assert isuper is not None
t = expand_type_by_instance(t, isuper)
freeze_all_type_vars(t)
return t.copy_modified(variables=list(tvars) + list(t.variables))
elif isinstance(t, Overloaded):
return Overloaded(
[
cast(
CallableType,
add_class_tvars(
item,
isuper,
is_classmethod,
is_staticmethod,
original_type,
original_vars=original_vars,
),
)
for item in t.items
]
)
if isuper is not None:
t = expand_type_by_instance(t, isuper)
return t
def type_object_type(info: TypeInfo, named_type: Callable[[str], Instance]) -> ProperType:
"""Return the type of a type object.
For a generic type G with type variables T and S the type is generally of form
Callable[..., G[T, S]]
where ... are argument types for the __init__/__new__ method (without the self
argument). Also, the fallback type will be 'type' instead of 'function'.
"""
# We take the type from whichever of __init__ and __new__ is first
# in the MRO, preferring __init__ if there is a tie.
init_method = info.get("__init__")
new_method = info.get("__new__")
if not init_method or not is_valid_constructor(init_method.node):
# Must be an invalid class definition.
return AnyType(TypeOfAny.from_error)
# There *should* always be a __new__ method except the test stubs
# lack it, so just copy init_method in that situation
new_method = new_method or init_method
if not is_valid_constructor(new_method.node):
# Must be an invalid class definition.
return AnyType(TypeOfAny.from_error)
# The two is_valid_constructor() checks ensure this.
assert isinstance(new_method.node, (SYMBOL_FUNCBASE_TYPES, Decorator))
assert isinstance(init_method.node, (SYMBOL_FUNCBASE_TYPES, Decorator))
init_index = info.mro.index(init_method.node.info)
new_index = info.mro.index(new_method.node.info)
fallback = info.metaclass_type or named_type("builtins.type")
if init_index < new_index:
method: FuncBase | Decorator = init_method.node
is_new = False
elif init_index > new_index:
method = new_method.node
is_new = True
else:
if init_method.node.info.fullname == "builtins.object":
# Both are defined by object. But if we've got a bogus
# base class, we can't know for sure, so check for that.
if info.fallback_to_any:
# Construct a universal callable as the prototype.
any_type = AnyType(TypeOfAny.special_form)
sig = CallableType(
arg_types=[any_type, any_type],
arg_kinds=[ARG_STAR, ARG_STAR2],
arg_names=["_args", "_kwds"],
ret_type=any_type,
fallback=named_type("builtins.function"),
)
return class_callable(sig, info, fallback, None, is_new=False)
# Otherwise prefer __init__ in a tie. It isn't clear that this
# is the right thing, but __new__ caused problems with
# typeshed (#5647).
method = init_method.node
is_new = False
# Construct callable type based on signature of __init__. Adjust
# return type and insert type arguments.
if isinstance(method, FuncBase):
t = function_type(method, fallback)
else:
assert isinstance(method.type, ProperType)
assert isinstance(method.type, FunctionLike) # is_valid_constructor() ensures this
t = method.type
return type_object_type_from_function(t, info, method.info, fallback, is_new)
def analyze_decorator_or_funcbase_access(
defn: Decorator | FuncBase,
itype: Instance,
info: TypeInfo,
self_type: Type | None,
name: str,
mx: MemberContext,
) -> Type:
"""Analyzes the type behind method access.
The function itself can possibly be decorated.
See: https://github.com/python/mypy/issues/10409
"""
if isinstance(defn, Decorator):
return analyze_var(name, defn.var, itype, info, mx)
return bind_self(
function_type(defn, mx.chk.named_type("builtins.function")), original_type=self_type
)
def is_valid_constructor(n: SymbolNode | None) -> bool:
"""Does this node represents a valid constructor method?
This includes normal functions, overloaded functions, and decorators
that return a callable type.
"""
if isinstance(n, FuncBase):
return True
if isinstance(n, Decorator):
return isinstance(get_proper_type(n.type), FunctionLike)
return False
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/checkmember.py
|
Python
|
NOASSERTION
| 55,260 |
"""Pattern checker. This file is conceptually part of TypeChecker."""
from __future__ import annotations
from collections import defaultdict
from typing import Final, NamedTuple
import mypy.checker
from mypy import message_registry
from mypy.checkmember import analyze_member_access
from mypy.expandtype import expand_type_by_instance
from mypy.join import join_types
from mypy.literals import literal_hash
from mypy.maptype import map_instance_to_supertype
from mypy.meet import narrow_declared_type
from mypy.messages import MessageBuilder
from mypy.nodes import ARG_POS, Context, Expression, NameExpr, TypeAlias, TypeInfo, Var
from mypy.options import Options
from mypy.patterns import (
AsPattern,
ClassPattern,
MappingPattern,
OrPattern,
Pattern,
SequencePattern,
SingletonPattern,
StarredPattern,
ValuePattern,
)
from mypy.plugin import Plugin
from mypy.subtypes import is_subtype
from mypy.typeops import (
coerce_to_literal,
make_simplified_union,
try_getting_str_literals_from_type,
tuple_fallback,
)
from mypy.types import (
AnyType,
Instance,
LiteralType,
NoneType,
ProperType,
TupleType,
Type,
TypedDictType,
TypeOfAny,
TypeVarTupleType,
UninhabitedType,
UnionType,
UnpackType,
find_unpack_in_list,
get_proper_type,
split_with_prefix_and_suffix,
)
from mypy.typevars import fill_typevars
from mypy.visitor import PatternVisitor
self_match_type_names: Final = [
"builtins.bool",
"builtins.bytearray",
"builtins.bytes",
"builtins.dict",
"builtins.float",
"builtins.frozenset",
"builtins.int",
"builtins.list",
"builtins.set",
"builtins.str",
"builtins.tuple",
]
non_sequence_match_type_names: Final = ["builtins.str", "builtins.bytes", "builtins.bytearray"]
# For every Pattern a PatternType can be calculated. This requires recursively calculating
# the PatternTypes of the sub-patterns first.
# Using the data in the PatternType the match subject and captured names can be narrowed/inferred.
class PatternType(NamedTuple):
type: Type # The type the match subject can be narrowed to
rest_type: Type # The remaining type if the pattern didn't match
captures: dict[Expression, Type] # The variables captured by the pattern
class PatternChecker(PatternVisitor[PatternType]):
"""Pattern checker.
This class checks if a pattern can match a type, what the type can be narrowed to, and what
type capture patterns should be inferred as.
"""
# Some services are provided by a TypeChecker instance.
chk: mypy.checker.TypeChecker
# This is shared with TypeChecker, but stored also here for convenience.
msg: MessageBuilder
# Currently unused
plugin: Plugin
# The expression being matched against the pattern
subject: Expression
subject_type: Type
# Type of the subject to check the (sub)pattern against
type_context: list[Type]
# Types that match against self instead of their __match_args__ if used as a class pattern
# Filled in from self_match_type_names
self_match_types: list[Type]
# Types that are sequences, but don't match sequence patterns. Filled in from
# non_sequence_match_type_names
non_sequence_match_types: list[Type]
options: Options
def __init__(
self, chk: mypy.checker.TypeChecker, msg: MessageBuilder, plugin: Plugin, options: Options
) -> None:
self.chk = chk
self.msg = msg
self.plugin = plugin
self.type_context = []
self.self_match_types = self.generate_types_from_names(self_match_type_names)
self.non_sequence_match_types = self.generate_types_from_names(
non_sequence_match_type_names
)
self.options = options
def accept(self, o: Pattern, type_context: Type) -> PatternType:
self.type_context.append(type_context)
result = o.accept(self)
self.type_context.pop()
return result
def visit_as_pattern(self, o: AsPattern) -> PatternType:
current_type = self.type_context[-1]
if o.pattern is not None:
pattern_type = self.accept(o.pattern, current_type)
typ, rest_type, type_map = pattern_type
else:
typ, rest_type, type_map = current_type, UninhabitedType(), {}
if not is_uninhabited(typ) and o.name is not None:
typ, _ = self.chk.conditional_types_with_intersection(
current_type, [get_type_range(typ)], o, default=current_type
)
if not is_uninhabited(typ):
type_map[o.name] = typ
return PatternType(typ, rest_type, type_map)
def visit_or_pattern(self, o: OrPattern) -> PatternType:
current_type = self.type_context[-1]
#
# Check all the subpatterns
#
pattern_types = []
for pattern in o.patterns:
pattern_type = self.accept(pattern, current_type)
pattern_types.append(pattern_type)
current_type = pattern_type.rest_type
#
# Collect the final type
#
types = []
for pattern_type in pattern_types:
if not is_uninhabited(pattern_type.type):
types.append(pattern_type.type)
#
# Check the capture types
#
capture_types: dict[Var, list[tuple[Expression, Type]]] = defaultdict(list)
# Collect captures from the first subpattern
for expr, typ in pattern_types[0].captures.items():
node = get_var(expr)
capture_types[node].append((expr, typ))
# Check if other subpatterns capture the same names
for i, pattern_type in enumerate(pattern_types[1:]):
vars = {get_var(expr) for expr, _ in pattern_type.captures.items()}
if capture_types.keys() != vars:
self.msg.fail(message_registry.OR_PATTERN_ALTERNATIVE_NAMES, o.patterns[i])
for expr, typ in pattern_type.captures.items():
node = get_var(expr)
capture_types[node].append((expr, typ))
captures: dict[Expression, Type] = {}
for capture_list in capture_types.values():
typ = UninhabitedType()
for _, other in capture_list:
typ = join_types(typ, other)
captures[capture_list[0][0]] = typ
union_type = make_simplified_union(types)
return PatternType(union_type, current_type, captures)
def visit_value_pattern(self, o: ValuePattern) -> PatternType:
current_type = self.type_context[-1]
typ = self.chk.expr_checker.accept(o.expr)
typ = coerce_to_literal(typ)
narrowed_type, rest_type = self.chk.conditional_types_with_intersection(
current_type, [get_type_range(typ)], o, default=get_proper_type(typ)
)
if not isinstance(get_proper_type(narrowed_type), (LiteralType, UninhabitedType)):
return PatternType(narrowed_type, UnionType.make_union([narrowed_type, rest_type]), {})
return PatternType(narrowed_type, rest_type, {})
def visit_singleton_pattern(self, o: SingletonPattern) -> PatternType:
current_type = self.type_context[-1]
value: bool | None = o.value
if isinstance(value, bool):
typ = self.chk.expr_checker.infer_literal_expr_type(value, "builtins.bool")
elif value is None:
typ = NoneType()
else:
assert False
narrowed_type, rest_type = self.chk.conditional_types_with_intersection(
current_type, [get_type_range(typ)], o, default=current_type
)
return PatternType(narrowed_type, rest_type, {})
def visit_sequence_pattern(self, o: SequencePattern) -> PatternType:
#
# check for existence of a starred pattern
#
current_type = get_proper_type(self.type_context[-1])
if not self.can_match_sequence(current_type):
return self.early_non_match()
star_positions = [i for i, p in enumerate(o.patterns) if isinstance(p, StarredPattern)]
star_position: int | None = None
if len(star_positions) == 1:
star_position = star_positions[0]
elif len(star_positions) >= 2:
assert False, "Parser should prevent multiple starred patterns"
required_patterns = len(o.patterns)
if star_position is not None:
required_patterns -= 1
#
# get inner types of original type
#
unpack_index = None
if isinstance(current_type, TupleType):
inner_types = current_type.items
unpack_index = find_unpack_in_list(inner_types)
if unpack_index is None:
size_diff = len(inner_types) - required_patterns
if size_diff < 0:
return self.early_non_match()
elif size_diff > 0 and star_position is None:
return self.early_non_match()
else:
normalized_inner_types = []
for it in inner_types:
# Unfortunately, it is not possible to "split" the TypeVarTuple
# into individual items, so we just use its upper bound for the whole
# analysis instead.
if isinstance(it, UnpackType) and isinstance(it.type, TypeVarTupleType):
it = UnpackType(it.type.upper_bound)
normalized_inner_types.append(it)
inner_types = normalized_inner_types
current_type = current_type.copy_modified(items=normalized_inner_types)
if len(inner_types) - 1 > required_patterns and star_position is None:
return self.early_non_match()
else:
inner_type = self.get_sequence_type(current_type, o)
if inner_type is None:
inner_type = self.chk.named_type("builtins.object")
inner_types = [inner_type] * len(o.patterns)
#
# match inner patterns
#
contracted_new_inner_types: list[Type] = []
contracted_rest_inner_types: list[Type] = []
captures: dict[Expression, Type] = {}
contracted_inner_types = self.contract_starred_pattern_types(
inner_types, star_position, required_patterns
)
for p, t in zip(o.patterns, contracted_inner_types):
pattern_type = self.accept(p, t)
typ, rest, type_map = pattern_type
contracted_new_inner_types.append(typ)
contracted_rest_inner_types.append(rest)
self.update_type_map(captures, type_map)
new_inner_types = self.expand_starred_pattern_types(
contracted_new_inner_types, star_position, len(inner_types), unpack_index is not None
)
rest_inner_types = self.expand_starred_pattern_types(
contracted_rest_inner_types, star_position, len(inner_types), unpack_index is not None
)
#
# Calculate new type
#
new_type: Type
rest_type: Type = current_type
if isinstance(current_type, TupleType) and unpack_index is None:
narrowed_inner_types = []
inner_rest_types = []
for inner_type, new_inner_type in zip(inner_types, new_inner_types):
(narrowed_inner_type, inner_rest_type) = (
self.chk.conditional_types_with_intersection(
inner_type, [get_type_range(new_inner_type)], o, default=inner_type
)
)
narrowed_inner_types.append(narrowed_inner_type)
inner_rest_types.append(inner_rest_type)
if all(not is_uninhabited(typ) for typ in narrowed_inner_types):
new_type = TupleType(narrowed_inner_types, current_type.partial_fallback)
else:
new_type = UninhabitedType()
if all(is_uninhabited(typ) for typ in inner_rest_types):
# All subpatterns always match, so we can apply negative narrowing
rest_type = TupleType(rest_inner_types, current_type.partial_fallback)
elif sum(not is_uninhabited(typ) for typ in inner_rest_types) == 1:
# Exactly one subpattern may conditionally match, the rest always match.
# We can apply negative narrowing to this one position.
rest_type = TupleType(
[
curr if is_uninhabited(rest) else rest
for curr, rest in zip(inner_types, inner_rest_types)
],
current_type.partial_fallback,
)
elif isinstance(current_type, TupleType):
# For variadic tuples it is too tricky to match individual items like for fixed
# tuples, so we instead try to narrow the entire type.
# TODO: use more precise narrowing when possible (e.g. for identical shapes).
new_tuple_type = TupleType(new_inner_types, current_type.partial_fallback)
new_type, rest_type = self.chk.conditional_types_with_intersection(
new_tuple_type, [get_type_range(current_type)], o, default=new_tuple_type
)
else:
new_inner_type = UninhabitedType()
for typ in new_inner_types:
new_inner_type = join_types(new_inner_type, typ)
new_type = self.construct_sequence_child(current_type, new_inner_type)
if is_subtype(new_type, current_type):
new_type, _ = self.chk.conditional_types_with_intersection(
current_type, [get_type_range(new_type)], o, default=current_type
)
else:
new_type = current_type
return PatternType(new_type, rest_type, captures)
def get_sequence_type(self, t: Type, context: Context) -> Type | None:
t = get_proper_type(t)
if isinstance(t, AnyType):
return AnyType(TypeOfAny.from_another_any, t)
if isinstance(t, UnionType):
items = [self.get_sequence_type(item, context) for item in t.items]
not_none_items = [item for item in items if item is not None]
if not_none_items:
return make_simplified_union(not_none_items)
else:
return None
if self.chk.type_is_iterable(t) and isinstance(t, (Instance, TupleType)):
if isinstance(t, TupleType):
t = tuple_fallback(t)
return self.chk.iterable_item_type(t, context)
else:
return None
def contract_starred_pattern_types(
self, types: list[Type], star_pos: int | None, num_patterns: int
) -> list[Type]:
"""
Contracts a list of types in a sequence pattern depending on the position of a starred
capture pattern.
For example if the sequence pattern [a, *b, c] is matched against types [bool, int, str,
bytes] the contracted types are [bool, Union[int, str], bytes].
If star_pos in None the types are returned unchanged.
"""
unpack_index = find_unpack_in_list(types)
if unpack_index is not None:
# Variadic tuples require "re-shaping" to match the requested pattern.
unpack = types[unpack_index]
assert isinstance(unpack, UnpackType)
unpacked = get_proper_type(unpack.type)
# This should be guaranteed by the normalization in the caller.
assert isinstance(unpacked, Instance) and unpacked.type.fullname == "builtins.tuple"
if star_pos is None:
missing = num_patterns - len(types) + 1
new_types = types[:unpack_index]
new_types += [unpacked.args[0]] * missing
new_types += types[unpack_index + 1 :]
return new_types
prefix, middle, suffix = split_with_prefix_and_suffix(
tuple([UnpackType(unpacked) if isinstance(t, UnpackType) else t for t in types]),
star_pos,
num_patterns - star_pos,
)
new_middle = []
for m in middle:
# The existing code expects the star item type, rather than the type of
# the whole tuple "slice".
if isinstance(m, UnpackType):
new_middle.append(unpacked.args[0])
else:
new_middle.append(m)
return list(prefix) + [make_simplified_union(new_middle)] + list(suffix)
else:
if star_pos is None:
return types
new_types = types[:star_pos]
star_length = len(types) - num_patterns
new_types.append(make_simplified_union(types[star_pos : star_pos + star_length]))
new_types += types[star_pos + star_length :]
return new_types
def expand_starred_pattern_types(
self, types: list[Type], star_pos: int | None, num_types: int, original_unpack: bool
) -> list[Type]:
"""Undoes the contraction done by contract_starred_pattern_types.
For example if the sequence pattern is [a, *b, c] and types [bool, int, str] are extended
to length 4 the result is [bool, int, int, str].
"""
if star_pos is None:
return types
if original_unpack:
# In the case where original tuple type has an unpack item, it is not practical
# to coerce pattern type back to the original shape (and may not even be possible),
# so we only restore the type of the star item.
res = []
for i, t in enumerate(types):
if i != star_pos:
res.append(t)
else:
res.append(UnpackType(self.chk.named_generic_type("builtins.tuple", [t])))
return res
new_types = types[:star_pos]
star_length = num_types - len(types) + 1
new_types += [types[star_pos]] * star_length
new_types += types[star_pos + 1 :]
return new_types
def visit_starred_pattern(self, o: StarredPattern) -> PatternType:
captures: dict[Expression, Type] = {}
if o.capture is not None:
list_type = self.chk.named_generic_type("builtins.list", [self.type_context[-1]])
captures[o.capture] = list_type
return PatternType(self.type_context[-1], UninhabitedType(), captures)
def visit_mapping_pattern(self, o: MappingPattern) -> PatternType:
current_type = get_proper_type(self.type_context[-1])
can_match = True
captures: dict[Expression, Type] = {}
for key, value in zip(o.keys, o.values):
inner_type = self.get_mapping_item_type(o, current_type, key)
if inner_type is None:
can_match = False
inner_type = self.chk.named_type("builtins.object")
pattern_type = self.accept(value, inner_type)
if is_uninhabited(pattern_type.type):
can_match = False
else:
self.update_type_map(captures, pattern_type.captures)
if o.rest is not None:
mapping = self.chk.named_type("typing.Mapping")
if is_subtype(current_type, mapping) and isinstance(current_type, Instance):
mapping_inst = map_instance_to_supertype(current_type, mapping.type)
dict_typeinfo = self.chk.lookup_typeinfo("builtins.dict")
rest_type = Instance(dict_typeinfo, mapping_inst.args)
else:
object_type = self.chk.named_type("builtins.object")
rest_type = self.chk.named_generic_type(
"builtins.dict", [object_type, object_type]
)
captures[o.rest] = rest_type
if can_match:
# We can't narrow the type here, as Mapping key is invariant.
new_type = self.type_context[-1]
else:
new_type = UninhabitedType()
return PatternType(new_type, current_type, captures)
def get_mapping_item_type(
self, pattern: MappingPattern, mapping_type: Type, key: Expression
) -> Type | None:
mapping_type = get_proper_type(mapping_type)
if isinstance(mapping_type, TypedDictType):
with self.msg.filter_errors() as local_errors:
result: Type | None = self.chk.expr_checker.visit_typeddict_index_expr(
mapping_type, key
)[0]
has_local_errors = local_errors.has_new_errors()
# If we can't determine the type statically fall back to treating it as a normal
# mapping
if has_local_errors:
with self.msg.filter_errors() as local_errors:
result = self.get_simple_mapping_item_type(pattern, mapping_type, key)
if local_errors.has_new_errors():
result = None
else:
with self.msg.filter_errors():
result = self.get_simple_mapping_item_type(pattern, mapping_type, key)
return result
def get_simple_mapping_item_type(
self, pattern: MappingPattern, mapping_type: Type, key: Expression
) -> Type:
result, _ = self.chk.expr_checker.check_method_call_by_name(
"__getitem__", mapping_type, [key], [ARG_POS], pattern
)
return result
def visit_class_pattern(self, o: ClassPattern) -> PatternType:
current_type = get_proper_type(self.type_context[-1])
#
# Check class type
#
type_info = o.class_ref.node
if type_info is None:
return PatternType(AnyType(TypeOfAny.from_error), AnyType(TypeOfAny.from_error), {})
if isinstance(type_info, TypeAlias) and not type_info.no_args:
self.msg.fail(message_registry.CLASS_PATTERN_GENERIC_TYPE_ALIAS, o)
return self.early_non_match()
if isinstance(type_info, TypeInfo):
any_type = AnyType(TypeOfAny.implementation_artifact)
args: list[Type] = []
for tv in type_info.defn.type_vars:
if isinstance(tv, TypeVarTupleType):
args.append(
UnpackType(self.chk.named_generic_type("builtins.tuple", [any_type]))
)
else:
args.append(any_type)
typ: Type = Instance(type_info, args)
elif isinstance(type_info, TypeAlias):
typ = type_info.target
elif (
isinstance(type_info, Var)
and type_info.type is not None
and isinstance(get_proper_type(type_info.type), AnyType)
):
typ = type_info.type
else:
if isinstance(type_info, Var) and type_info.type is not None:
name = type_info.type.str_with_options(self.options)
else:
name = type_info.name
self.msg.fail(message_registry.CLASS_PATTERN_TYPE_REQUIRED.format(name), o)
return self.early_non_match()
new_type, rest_type = self.chk.conditional_types_with_intersection(
current_type, [get_type_range(typ)], o, default=current_type
)
if is_uninhabited(new_type):
return self.early_non_match()
# TODO: Do I need this?
narrowed_type = narrow_declared_type(current_type, new_type)
#
# Convert positional to keyword patterns
#
keyword_pairs: list[tuple[str | None, Pattern]] = []
match_arg_set: set[str] = set()
captures: dict[Expression, Type] = {}
if len(o.positionals) != 0:
if self.should_self_match(typ):
if len(o.positionals) > 1:
self.msg.fail(message_registry.CLASS_PATTERN_TOO_MANY_POSITIONAL_ARGS, o)
pattern_type = self.accept(o.positionals[0], narrowed_type)
if not is_uninhabited(pattern_type.type):
return PatternType(
pattern_type.type,
join_types(rest_type, pattern_type.rest_type),
pattern_type.captures,
)
captures = pattern_type.captures
else:
with self.msg.filter_errors() as local_errors:
match_args_type = analyze_member_access(
"__match_args__",
typ,
o,
is_lvalue=False,
is_super=False,
is_operator=False,
msg=self.msg,
original_type=typ,
chk=self.chk,
)
has_local_errors = local_errors.has_new_errors()
if has_local_errors:
self.msg.fail(
message_registry.MISSING_MATCH_ARGS.format(
typ.str_with_options(self.options)
),
o,
)
return self.early_non_match()
proper_match_args_type = get_proper_type(match_args_type)
if isinstance(proper_match_args_type, TupleType):
match_arg_names = get_match_arg_names(proper_match_args_type)
if len(o.positionals) > len(match_arg_names):
self.msg.fail(message_registry.CLASS_PATTERN_TOO_MANY_POSITIONAL_ARGS, o)
return self.early_non_match()
else:
match_arg_names = [None] * len(o.positionals)
for arg_name, pos in zip(match_arg_names, o.positionals):
keyword_pairs.append((arg_name, pos))
if arg_name is not None:
match_arg_set.add(arg_name)
#
# Check for duplicate patterns
#
keyword_arg_set = set()
has_duplicates = False
for key, value in zip(o.keyword_keys, o.keyword_values):
keyword_pairs.append((key, value))
if key in match_arg_set:
self.msg.fail(
message_registry.CLASS_PATTERN_KEYWORD_MATCHES_POSITIONAL.format(key), value
)
has_duplicates = True
elif key in keyword_arg_set:
self.msg.fail(
message_registry.CLASS_PATTERN_DUPLICATE_KEYWORD_PATTERN.format(key), value
)
has_duplicates = True
keyword_arg_set.add(key)
if has_duplicates:
return self.early_non_match()
#
# Check keyword patterns
#
can_match = True
for keyword, pattern in keyword_pairs:
key_type: Type | None = None
with self.msg.filter_errors() as local_errors:
if keyword is not None:
key_type = analyze_member_access(
keyword,
narrowed_type,
pattern,
is_lvalue=False,
is_super=False,
is_operator=False,
msg=self.msg,
original_type=new_type,
chk=self.chk,
)
else:
key_type = AnyType(TypeOfAny.from_error)
has_local_errors = local_errors.has_new_errors()
if has_local_errors or key_type is None:
key_type = AnyType(TypeOfAny.from_error)
self.msg.fail(
message_registry.CLASS_PATTERN_UNKNOWN_KEYWORD.format(
typ.str_with_options(self.options), keyword
),
pattern,
)
inner_type, inner_rest_type, inner_captures = self.accept(pattern, key_type)
if is_uninhabited(inner_type):
can_match = False
else:
self.update_type_map(captures, inner_captures)
if not is_uninhabited(inner_rest_type):
rest_type = current_type
if not can_match:
new_type = UninhabitedType()
return PatternType(new_type, rest_type, captures)
def should_self_match(self, typ: Type) -> bool:
typ = get_proper_type(typ)
if isinstance(typ, Instance) and typ.type.is_named_tuple:
return False
for other in self.self_match_types:
if is_subtype(typ, other):
return True
return False
def can_match_sequence(self, typ: ProperType) -> bool:
if isinstance(typ, UnionType):
return any(self.can_match_sequence(get_proper_type(item)) for item in typ.items)
for other in self.non_sequence_match_types:
# We have to ignore promotions, as memoryview should match, but bytes,
# which it can be promoted to, shouldn't
if is_subtype(typ, other, ignore_promotions=True):
return False
sequence = self.chk.named_type("typing.Sequence")
# If the static type is more general than sequence the actual type could still match
return is_subtype(typ, sequence) or is_subtype(sequence, typ)
def generate_types_from_names(self, type_names: list[str]) -> list[Type]:
types: list[Type] = []
for name in type_names:
try:
types.append(self.chk.named_type(name))
except KeyError as e:
# Some built in types are not defined in all test cases
if not name.startswith("builtins."):
raise e
return types
def update_type_map(
self, original_type_map: dict[Expression, Type], extra_type_map: dict[Expression, Type]
) -> None:
# Calculating this would not be needed if TypeMap directly used literal hashes instead of
# expressions, as suggested in the TODO above it's definition
already_captured = {literal_hash(expr) for expr in original_type_map}
for expr, typ in extra_type_map.items():
if literal_hash(expr) in already_captured:
node = get_var(expr)
self.msg.fail(
message_registry.MULTIPLE_ASSIGNMENTS_IN_PATTERN.format(node.name), expr
)
else:
original_type_map[expr] = typ
def construct_sequence_child(self, outer_type: Type, inner_type: Type) -> Type:
"""
If outer_type is a child class of typing.Sequence returns a new instance of
outer_type, that is a Sequence of inner_type. If outer_type is not a child class of
typing.Sequence just returns a Sequence of inner_type
For example:
construct_sequence_child(List[int], str) = List[str]
TODO: this doesn't make sense. For example if one has class S(Sequence[int], Generic[T])
or class T(Sequence[Tuple[T, T]]), there is no way any of those can map to Sequence[str].
"""
proper_type = get_proper_type(outer_type)
if isinstance(proper_type, UnionType):
types = [
self.construct_sequence_child(item, inner_type)
for item in proper_type.items
if self.can_match_sequence(get_proper_type(item))
]
return make_simplified_union(types)
sequence = self.chk.named_generic_type("typing.Sequence", [inner_type])
if is_subtype(outer_type, self.chk.named_type("typing.Sequence")):
proper_type = get_proper_type(outer_type)
if isinstance(proper_type, TupleType):
proper_type = tuple_fallback(proper_type)
assert isinstance(proper_type, Instance)
empty_type = fill_typevars(proper_type.type)
partial_type = expand_type_by_instance(empty_type, sequence)
return expand_type_by_instance(partial_type, proper_type)
else:
return sequence
def early_non_match(self) -> PatternType:
return PatternType(UninhabitedType(), self.type_context[-1], {})
def get_match_arg_names(typ: TupleType) -> list[str | None]:
args: list[str | None] = []
for item in typ.items:
values = try_getting_str_literals_from_type(item)
if values is None or len(values) != 1:
args.append(None)
else:
args.append(values[0])
return args
def get_var(expr: Expression) -> Var:
"""
Warning: this in only true for expressions captured by a match statement.
Don't call it from anywhere else
"""
assert isinstance(expr, NameExpr)
node = expr.node
assert isinstance(node, Var)
return node
def get_type_range(typ: Type) -> mypy.checker.TypeRange:
typ = get_proper_type(typ)
if (
isinstance(typ, Instance)
and typ.last_known_value
and isinstance(typ.last_known_value.value, bool)
):
typ = typ.last_known_value
return mypy.checker.TypeRange(typ, is_upper_bound=False)
def is_uninhabited(typ: Type) -> bool:
return isinstance(get_proper_type(typ), UninhabitedType)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/checkpattern.py
|
Python
|
NOASSERTION
| 33,519 |
"""
Format expression type checker.
This file is conceptually part of ExpressionChecker and TypeChecker. Main functionality
is located in StringFormatterChecker.check_str_format_call() for '{}'.format(), and in
StringFormatterChecker.check_str_interpolation() for printf-style % interpolation.
Note that although at runtime format strings are parsed using custom parsers,
here we use a regexp-based approach. This way we 99% match runtime behaviour while keeping
implementation simple.
"""
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Callable, Dict, Final, Match, Pattern, Tuple, Union, cast
from typing_extensions import TypeAlias as _TypeAlias
import mypy.errorcodes as codes
from mypy.errors import Errors
from mypy.nodes import (
ARG_NAMED,
ARG_POS,
ARG_STAR,
ARG_STAR2,
BytesExpr,
CallExpr,
Context,
DictExpr,
Expression,
ExpressionStmt,
IndexExpr,
IntExpr,
MemberExpr,
MypyFile,
NameExpr,
Node,
StarExpr,
StrExpr,
TempNode,
TupleExpr,
)
from mypy.types import (
AnyType,
Instance,
LiteralType,
TupleType,
Type,
TypeOfAny,
TypeVarTupleType,
TypeVarType,
UnionType,
UnpackType,
find_unpack_in_list,
get_proper_type,
get_proper_types,
)
if TYPE_CHECKING:
# break import cycle only needed for mypy
import mypy.checker
import mypy.checkexpr
from mypy import message_registry
from mypy.maptype import map_instance_to_supertype
from mypy.messages import MessageBuilder
from mypy.parse import parse
from mypy.subtypes import is_subtype
from mypy.typeops import custom_special_method
FormatStringExpr: _TypeAlias = Union[StrExpr, BytesExpr]
Checkers: _TypeAlias = Tuple[Callable[[Expression], None], Callable[[Type], bool]]
MatchMap: _TypeAlias = Dict[Tuple[int, int], Match[str]] # span -> match
def compile_format_re() -> Pattern[str]:
"""Construct regexp to match format conversion specifiers in % interpolation.
See https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting
The regexp is intentionally a bit wider to report better errors.
"""
key_re = r"(\((?P<key>[^)]*)\))?" # (optional) parenthesised sequence of characters.
flags_re = r"(?P<flags>[#0\-+ ]*)" # (optional) sequence of flags.
width_re = r"(?P<width>[1-9][0-9]*|\*)?" # (optional) minimum field width (* or numbers).
precision_re = r"(?:\.(?P<precision>\*|[0-9]+)?)?" # (optional) . followed by * of numbers.
length_mod_re = r"[hlL]?" # (optional) length modifier (unused).
type_re = r"(?P<type>.)?" # conversion type.
format_re = "%" + key_re + flags_re + width_re + precision_re + length_mod_re + type_re
return re.compile(format_re)
def compile_new_format_re(custom_spec: bool) -> Pattern[str]:
"""Construct regexps to match format conversion specifiers in str.format() calls.
See After https://docs.python.org/3/library/string.html#formatspec for
specifications. The regexps are intentionally wider, to report better errors,
instead of just not matching.
"""
# Field (optional) is an integer/identifier possibly followed by several .attr and [index].
field = r"(?P<field>(?P<key>[^.[!:]*)([^:!]+)?)"
# Conversion (optional) is ! followed by one of letters for forced repr(), str(), or ascii().
conversion = r"(?P<conversion>![^:])?"
# Format specification (optional) follows its own mini-language:
if not custom_spec:
# Fill and align is valid for all builtin types.
fill_align = r"(?P<fill_align>.?[<>=^])?"
# Number formatting options are only valid for int, float, complex, and Decimal,
# except if only width is given (it is valid for all types).
# This contains sign, flags (sign, # and/or 0), width, grouping (_ or ,) and precision.
num_spec = r"(?P<flags>[+\- ]?#?0?)(?P<width>\d+)?[_,]?(?P<precision>\.\d+)?"
# The last element is type.
conv_type = r"(?P<type>.)?" # only some are supported, but we want to give a better error
format_spec = r"(?P<format_spec>:" + fill_align + num_spec + conv_type + r")?"
else:
# Custom types can define their own form_spec using __format__().
format_spec = r"(?P<format_spec>:.*)?"
return re.compile(field + conversion + format_spec)
FORMAT_RE: Final = compile_format_re()
FORMAT_RE_NEW: Final = compile_new_format_re(False)
FORMAT_RE_NEW_CUSTOM: Final = compile_new_format_re(True)
DUMMY_FIELD_NAME: Final = "__dummy_name__"
# Types that require either int or float.
NUMERIC_TYPES_OLD: Final = {"d", "i", "o", "u", "x", "X", "e", "E", "f", "F", "g", "G"}
NUMERIC_TYPES_NEW: Final = {"b", "d", "o", "e", "E", "f", "F", "g", "G", "n", "x", "X", "%"}
# These types accept _only_ int.
REQUIRE_INT_OLD: Final = {"o", "x", "X"}
REQUIRE_INT_NEW: Final = {"b", "d", "o", "x", "X"}
# These types fall back to SupportsFloat with % (other fall back to SupportsInt)
FLOAT_TYPES: Final = {"e", "E", "f", "F", "g", "G"}
class ConversionSpecifier:
def __init__(
self, match: Match[str], start_pos: int = -1, non_standard_format_spec: bool = False
) -> None:
self.whole_seq = match.group()
self.start_pos = start_pos
m_dict = match.groupdict()
self.key = m_dict.get("key")
# Replace unmatched optional groups with empty matches (for convenience).
self.conv_type = m_dict.get("type", "")
self.flags = m_dict.get("flags", "")
self.width = m_dict.get("width", "")
self.precision = m_dict.get("precision", "")
# Used only for str.format() calls (it may be custom for types with __format__()).
self.format_spec = m_dict.get("format_spec")
self.non_standard_format_spec = non_standard_format_spec
# Used only for str.format() calls.
self.conversion = m_dict.get("conversion")
# Full formatted expression (i.e. key plus following attributes and/or indexes).
# Used only for str.format() calls.
self.field = m_dict.get("field")
def has_key(self) -> bool:
return self.key is not None
def has_star(self) -> bool:
return self.width == "*" or self.precision == "*"
def parse_conversion_specifiers(format_str: str) -> list[ConversionSpecifier]:
"""Parse c-printf-style format string into list of conversion specifiers."""
specifiers: list[ConversionSpecifier] = []
for m in re.finditer(FORMAT_RE, format_str):
specifiers.append(ConversionSpecifier(m, start_pos=m.start()))
return specifiers
def parse_format_value(
format_value: str, ctx: Context, msg: MessageBuilder, nested: bool = False
) -> list[ConversionSpecifier] | None:
"""Parse format string into list of conversion specifiers.
The specifiers may be nested (two levels maximum), in this case they are ordered as
'{0:{1}}, {2:{3}{4}}'. Return None in case of an error.
"""
top_targets = find_non_escaped_targets(format_value, ctx, msg)
if top_targets is None:
return None
result: list[ConversionSpecifier] = []
for target, start_pos in top_targets:
match = FORMAT_RE_NEW.fullmatch(target)
if match:
conv_spec = ConversionSpecifier(match, start_pos=start_pos)
else:
custom_match = FORMAT_RE_NEW_CUSTOM.fullmatch(target)
if custom_match:
conv_spec = ConversionSpecifier(
custom_match, start_pos=start_pos, non_standard_format_spec=True
)
else:
msg.fail(
"Invalid conversion specifier in format string",
ctx,
code=codes.STRING_FORMATTING,
)
return None
if conv_spec.key and ("{" in conv_spec.key or "}" in conv_spec.key):
msg.fail("Conversion value must not contain { or }", ctx, code=codes.STRING_FORMATTING)
return None
result.append(conv_spec)
# Parse nested conversions that are allowed in format specifier.
if (
conv_spec.format_spec
and conv_spec.non_standard_format_spec
and ("{" in conv_spec.format_spec or "}" in conv_spec.format_spec)
):
if nested:
msg.fail(
"Formatting nesting must be at most two levels deep",
ctx,
code=codes.STRING_FORMATTING,
)
return None
sub_conv_specs = parse_format_value(conv_spec.format_spec, ctx, msg, nested=True)
if sub_conv_specs is None:
return None
result.extend(sub_conv_specs)
return result
def find_non_escaped_targets(
format_value: str, ctx: Context, msg: MessageBuilder
) -> list[tuple[str, int]] | None:
"""Return list of raw (un-parsed) format specifiers in format string.
Format specifiers don't include enclosing braces. We don't use regexp for
this because they don't work well with nested/repeated patterns
(both greedy and non-greedy), and these are heavily used internally for
representation of f-strings.
Return None in case of an error.
"""
result = []
next_spec = ""
pos = 0
nesting = 0
while pos < len(format_value):
c = format_value[pos]
if not nesting:
# Skip any paired '{{' and '}}', enter nesting on '{', report error on '}'.
if c == "{":
if pos < len(format_value) - 1 and format_value[pos + 1] == "{":
pos += 1
else:
nesting = 1
if c == "}":
if pos < len(format_value) - 1 and format_value[pos + 1] == "}":
pos += 1
else:
msg.fail(
"Invalid conversion specifier in format string: unexpected }",
ctx,
code=codes.STRING_FORMATTING,
)
return None
else:
# Adjust nesting level, then either continue adding chars or move on.
if c == "{":
nesting += 1
if c == "}":
nesting -= 1
if nesting:
next_spec += c
else:
result.append((next_spec, pos - len(next_spec)))
next_spec = ""
pos += 1
if nesting:
msg.fail(
"Invalid conversion specifier in format string: unmatched {",
ctx,
code=codes.STRING_FORMATTING,
)
return None
return result
class StringFormatterChecker:
"""String interpolation/formatter type checker.
This class works closely together with checker.ExpressionChecker.
"""
# Some services are provided by a TypeChecker instance.
chk: mypy.checker.TypeChecker
# This is shared with TypeChecker, but stored also here for convenience.
msg: MessageBuilder
# Some services are provided by a ExpressionChecker instance.
exprchk: mypy.checkexpr.ExpressionChecker
def __init__(
self,
exprchk: mypy.checkexpr.ExpressionChecker,
chk: mypy.checker.TypeChecker,
msg: MessageBuilder,
) -> None:
"""Construct an expression type checker."""
self.chk = chk
self.exprchk = exprchk
self.msg = msg
def check_str_format_call(self, call: CallExpr, format_value: str) -> None:
"""Perform more precise checks for str.format() calls when possible.
Currently the checks are performed for:
* Actual string literals
* Literal types with string values
* Final names with string values
The checks that we currently perform:
* Check generic validity (e.g. unmatched { or }, and {} in invalid positions)
* Check consistency of specifiers' auto-numbering
* Verify that replacements can be found for all conversion specifiers,
and all arguments were used
* Non-standard format specs are only allowed for types with custom __format__
* Type check replacements with accessors applied (if any).
* Verify that specifier type is known and matches replacement type
* Perform special checks for some specifier types:
- 'c' requires a single character string
- 's' must not accept bytes
- non-empty flags are only allowed for numeric types
"""
conv_specs = parse_format_value(format_value, call, self.msg)
if conv_specs is None:
return
if not self.auto_generate_keys(conv_specs, call):
return
self.check_specs_in_format_call(call, conv_specs, format_value)
def check_specs_in_format_call(
self, call: CallExpr, specs: list[ConversionSpecifier], format_value: str
) -> None:
"""Perform pairwise checks for conversion specifiers vs their replacements.
The core logic for format checking is implemented in this method.
"""
assert all(s.key for s in specs), "Keys must be auto-generated first!"
replacements = self.find_replacements_in_call(call, [cast(str, s.key) for s in specs])
assert len(replacements) == len(specs)
for spec, repl in zip(specs, replacements):
repl = self.apply_field_accessors(spec, repl, ctx=call)
actual_type = repl.type if isinstance(repl, TempNode) else self.chk.lookup_type(repl)
assert actual_type is not None
# Special case custom formatting.
if (
spec.format_spec
and spec.non_standard_format_spec
and
# Exclude "dynamic" specifiers (i.e. containing nested formatting).
not ("{" in spec.format_spec or "}" in spec.format_spec)
):
if (
not custom_special_method(actual_type, "__format__", check_all=True)
or spec.conversion
):
# TODO: add support for some custom specs like datetime?
self.msg.fail(
f'Unrecognized format specification "{spec.format_spec[1:]}"',
call,
code=codes.STRING_FORMATTING,
)
continue
# Adjust expected and actual types.
if not spec.conv_type:
expected_type: Type | None = AnyType(TypeOfAny.special_form)
else:
assert isinstance(call.callee, MemberExpr)
if isinstance(call.callee.expr, StrExpr):
format_str = call.callee.expr
else:
format_str = StrExpr(format_value)
expected_type = self.conversion_type(
spec.conv_type, call, format_str, format_call=True
)
if spec.conversion is not None:
# If the explicit conversion is given, then explicit conversion is called _first_.
if spec.conversion[1] not in "rsa":
self.msg.fail(
'Invalid conversion type "{}",'
' must be one of "r", "s" or "a"'.format(spec.conversion[1]),
call,
code=codes.STRING_FORMATTING,
)
actual_type = self.named_type("builtins.str")
# Perform the checks for given types.
if expected_type is None:
continue
a_type = get_proper_type(actual_type)
actual_items = (
get_proper_types(a_type.items) if isinstance(a_type, UnionType) else [a_type]
)
for a_type in actual_items:
if custom_special_method(a_type, "__format__"):
continue
self.check_placeholder_type(a_type, expected_type, call)
self.perform_special_format_checks(spec, call, repl, a_type, expected_type)
def perform_special_format_checks(
self,
spec: ConversionSpecifier,
call: CallExpr,
repl: Expression,
actual_type: Type,
expected_type: Type,
) -> None:
# TODO: try refactoring to combine this logic with % formatting.
if spec.conv_type == "c":
if isinstance(repl, (StrExpr, BytesExpr)) and len(repl.value) != 1:
self.msg.requires_int_or_char(call, format_call=True)
c_typ = get_proper_type(self.chk.lookup_type(repl))
if isinstance(c_typ, Instance) and c_typ.last_known_value:
c_typ = c_typ.last_known_value
if isinstance(c_typ, LiteralType) and isinstance(c_typ.value, str):
if len(c_typ.value) != 1:
self.msg.requires_int_or_char(call, format_call=True)
if (not spec.conv_type or spec.conv_type == "s") and not spec.conversion:
if has_type_component(actual_type, "builtins.bytes") and not custom_special_method(
actual_type, "__str__"
):
self.msg.fail(
'If x = b\'abc\' then f"{x}" or "{}".format(x) produces "b\'abc\'", '
'not "abc". If this is desired behavior, use f"{x!r}" or "{!r}".format(x). '
"Otherwise, decode the bytes",
call,
code=codes.STR_BYTES_PY3,
)
if spec.flags:
numeric_types = UnionType(
[self.named_type("builtins.int"), self.named_type("builtins.float")]
)
if (
spec.conv_type
and spec.conv_type not in NUMERIC_TYPES_NEW
or not spec.conv_type
and not is_subtype(actual_type, numeric_types)
and not custom_special_method(actual_type, "__format__")
):
self.msg.fail(
"Numeric flags are only allowed for numeric types",
call,
code=codes.STRING_FORMATTING,
)
def find_replacements_in_call(self, call: CallExpr, keys: list[str]) -> list[Expression]:
"""Find replacement expression for every specifier in str.format() call.
In case of an error use TempNode(AnyType).
"""
result: list[Expression] = []
used: set[Expression] = set()
for key in keys:
if key.isdecimal():
expr = self.get_expr_by_position(int(key), call)
if not expr:
self.msg.fail(
"Cannot find replacement for positional"
" format specifier {}".format(key),
call,
code=codes.STRING_FORMATTING,
)
expr = TempNode(AnyType(TypeOfAny.from_error))
else:
expr = self.get_expr_by_name(key, call)
if not expr:
self.msg.fail(
f'Cannot find replacement for named format specifier "{key}"',
call,
code=codes.STRING_FORMATTING,
)
expr = TempNode(AnyType(TypeOfAny.from_error))
result.append(expr)
if not isinstance(expr, TempNode):
used.add(expr)
# Strictly speaking not using all replacements is not a type error, but most likely
# a typo in user code, so we show an error like we do for % formatting.
total_explicit = len([kind for kind in call.arg_kinds if kind in (ARG_POS, ARG_NAMED)])
if len(used) < total_explicit:
self.msg.too_many_string_formatting_arguments(call)
return result
def get_expr_by_position(self, pos: int, call: CallExpr) -> Expression | None:
"""Get positional replacement expression from '{0}, {1}'.format(x, y, ...) call.
If the type is from *args, return TempNode(<item type>). Return None in case of
an error.
"""
pos_args = [arg for arg, kind in zip(call.args, call.arg_kinds) if kind == ARG_POS]
if pos < len(pos_args):
return pos_args[pos]
star_args = [arg for arg, kind in zip(call.args, call.arg_kinds) if kind == ARG_STAR]
if not star_args:
return None
# Fall back to *args when present in call.
star_arg = star_args[0]
varargs_type = get_proper_type(self.chk.lookup_type(star_arg))
if not isinstance(varargs_type, Instance) or not varargs_type.type.has_base(
"typing.Sequence"
):
# Error should be already reported.
return TempNode(AnyType(TypeOfAny.special_form))
iter_info = self.chk.named_generic_type(
"typing.Sequence", [AnyType(TypeOfAny.special_form)]
).type
return TempNode(map_instance_to_supertype(varargs_type, iter_info).args[0])
def get_expr_by_name(self, key: str, call: CallExpr) -> Expression | None:
"""Get named replacement expression from '{name}'.format(name=...) call.
If the type is from **kwargs, return TempNode(<item type>). Return None in case of
an error.
"""
named_args = [
arg
for arg, kind, name in zip(call.args, call.arg_kinds, call.arg_names)
if kind == ARG_NAMED and name == key
]
if named_args:
return named_args[0]
star_args_2 = [arg for arg, kind in zip(call.args, call.arg_kinds) if kind == ARG_STAR2]
if not star_args_2:
return None
star_arg_2 = star_args_2[0]
kwargs_type = get_proper_type(self.chk.lookup_type(star_arg_2))
if not isinstance(kwargs_type, Instance) or not kwargs_type.type.has_base(
"typing.Mapping"
):
# Error should be already reported.
return TempNode(AnyType(TypeOfAny.special_form))
any_type = AnyType(TypeOfAny.special_form)
mapping_info = self.chk.named_generic_type("typing.Mapping", [any_type, any_type]).type
return TempNode(map_instance_to_supertype(kwargs_type, mapping_info).args[1])
def auto_generate_keys(self, all_specs: list[ConversionSpecifier], ctx: Context) -> bool:
"""Translate '{} {name} {}' to '{0} {name} {1}'.
Return True if generation was successful, otherwise report an error and return false.
"""
some_defined = any(s.key and s.key.isdecimal() for s in all_specs)
all_defined = all(bool(s.key) for s in all_specs)
if some_defined and not all_defined:
self.msg.fail(
"Cannot combine automatic field numbering and manual field specification",
ctx,
code=codes.STRING_FORMATTING,
)
return False
if all_defined:
return True
next_index = 0
for spec in all_specs:
if not spec.key:
str_index = str(next_index)
spec.key = str_index
# Update also the full field (i.e. turn {.x} into {0.x}).
if not spec.field:
spec.field = str_index
else:
spec.field = str_index + spec.field
next_index += 1
return True
def apply_field_accessors(
self, spec: ConversionSpecifier, repl: Expression, ctx: Context
) -> Expression:
"""Transform and validate expr in '{.attr[item]}'.format(expr) into expr.attr['item'].
If validation fails, return TempNode(AnyType).
"""
assert spec.key, "Keys must be auto-generated first!"
if spec.field == spec.key:
return repl
assert spec.field
temp_errors = Errors(self.chk.options)
dummy = DUMMY_FIELD_NAME + spec.field[len(spec.key) :]
temp_ast: Node = parse(
dummy, fnam="<format>", module=None, options=self.chk.options, errors=temp_errors
)
if temp_errors.is_errors():
self.msg.fail(
f'Syntax error in format specifier "{spec.field}"',
ctx,
code=codes.STRING_FORMATTING,
)
return TempNode(AnyType(TypeOfAny.from_error))
# These asserts are guaranteed by the original regexp.
assert isinstance(temp_ast, MypyFile)
temp_ast = temp_ast.defs[0]
assert isinstance(temp_ast, ExpressionStmt)
temp_ast = temp_ast.expr
if not self.validate_and_transform_accessors(temp_ast, repl, spec, ctx=ctx):
return TempNode(AnyType(TypeOfAny.from_error))
# Check if there are any other errors (like missing members).
# TODO: fix column to point to actual start of the format specifier _within_ string.
temp_ast.line = ctx.line
temp_ast.column = ctx.column
self.exprchk.accept(temp_ast)
return temp_ast
def validate_and_transform_accessors(
self,
temp_ast: Expression,
original_repl: Expression,
spec: ConversionSpecifier,
ctx: Context,
) -> bool:
"""Validate and transform (in-place) format field accessors.
On error, report it and return False. The transformations include replacing the dummy
variable with actual replacement expression and translating any name expressions in an
index into strings, so that this will work:
class User(TypedDict):
name: str
id: int
u: User
'{[id]:d} -> {[name]}'.format(u)
"""
if not isinstance(temp_ast, (MemberExpr, IndexExpr)):
self.msg.fail(
"Only index and member expressions are allowed in"
' format field accessors; got "{}"'.format(spec.field),
ctx,
code=codes.STRING_FORMATTING,
)
return False
if isinstance(temp_ast, MemberExpr):
node = temp_ast.expr
else:
node = temp_ast.base
if not isinstance(temp_ast.index, (NameExpr, IntExpr)):
assert spec.key, "Call this method only after auto-generating keys!"
assert spec.field
self.msg.fail(
"Invalid index expression in format field"
' accessor "{}"'.format(spec.field[len(spec.key) :]),
ctx,
code=codes.STRING_FORMATTING,
)
return False
if isinstance(temp_ast.index, NameExpr):
temp_ast.index = StrExpr(temp_ast.index.name)
if isinstance(node, NameExpr) and node.name == DUMMY_FIELD_NAME:
# Replace it with the actual replacement expression.
assert isinstance(temp_ast, (IndexExpr, MemberExpr)) # XXX: this is redundant
if isinstance(temp_ast, IndexExpr):
temp_ast.base = original_repl
else:
temp_ast.expr = original_repl
return True
node.line = ctx.line
node.column = ctx.column
return self.validate_and_transform_accessors(
node, original_repl=original_repl, spec=spec, ctx=ctx
)
# TODO: In Python 3, the bytes formatting has a more restricted set of options
# compared to string formatting.
def check_str_interpolation(self, expr: FormatStringExpr, replacements: Expression) -> Type:
"""Check the types of the 'replacements' in a string interpolation
expression: str % replacements.
"""
self.exprchk.accept(expr)
specifiers = parse_conversion_specifiers(expr.value)
has_mapping_keys = self.analyze_conversion_specifiers(specifiers, expr)
if has_mapping_keys is None:
pass # Error was reported
elif has_mapping_keys:
self.check_mapping_str_interpolation(specifiers, replacements, expr)
else:
self.check_simple_str_interpolation(specifiers, replacements, expr)
if isinstance(expr, BytesExpr):
return self.named_type("builtins.bytes")
elif isinstance(expr, StrExpr):
return self.named_type("builtins.str")
else:
assert False
def analyze_conversion_specifiers(
self, specifiers: list[ConversionSpecifier], context: Context
) -> bool | None:
has_star = any(specifier.has_star() for specifier in specifiers)
has_key = any(specifier.has_key() for specifier in specifiers)
all_have_keys = all(
specifier.has_key() or specifier.conv_type == "%" for specifier in specifiers
)
if has_key and has_star:
self.msg.string_interpolation_with_star_and_key(context)
return None
if has_key and not all_have_keys:
self.msg.string_interpolation_mixing_key_and_non_keys(context)
return None
return has_key
def check_simple_str_interpolation(
self,
specifiers: list[ConversionSpecifier],
replacements: Expression,
expr: FormatStringExpr,
) -> None:
"""Check % string interpolation with positional specifiers '%s, %d' % ('yes, 42')."""
checkers = self.build_replacement_checkers(specifiers, replacements, expr)
if checkers is None:
return
rhs_type = get_proper_type(self.accept(replacements))
rep_types: list[Type] = []
if isinstance(rhs_type, TupleType):
rep_types = rhs_type.items
unpack_index = find_unpack_in_list(rep_types)
if unpack_index is not None:
# TODO: we should probably warn about potentially short tuple.
# However, without special-casing for tuple(f(i) for in other_tuple)
# this causes false positive on mypy self-check in report.py.
extras = max(0, len(checkers) - len(rep_types) + 1)
unpacked = rep_types[unpack_index]
assert isinstance(unpacked, UnpackType)
unpacked = get_proper_type(unpacked.type)
if isinstance(unpacked, TypeVarTupleType):
unpacked = get_proper_type(unpacked.upper_bound)
assert (
isinstance(unpacked, Instance) and unpacked.type.fullname == "builtins.tuple"
)
unpack_items = [unpacked.args[0]] * extras
rep_types = rep_types[:unpack_index] + unpack_items + rep_types[unpack_index + 1 :]
elif isinstance(rhs_type, AnyType):
return
elif isinstance(rhs_type, Instance) and rhs_type.type.fullname == "builtins.tuple":
# Assume that an arbitrary-length tuple has the right number of items.
rep_types = [rhs_type.args[0]] * len(checkers)
elif isinstance(rhs_type, UnionType):
for typ in rhs_type.relevant_items():
temp_node = TempNode(typ)
temp_node.line = replacements.line
self.check_simple_str_interpolation(specifiers, temp_node, expr)
return
else:
rep_types = [rhs_type]
if len(checkers) > len(rep_types):
# Only check the fix-length Tuple type. Other Iterable types would skip.
if is_subtype(rhs_type, self.chk.named_type("typing.Iterable")) and not isinstance(
rhs_type, TupleType
):
return
else:
self.msg.too_few_string_formatting_arguments(replacements)
elif len(checkers) < len(rep_types):
self.msg.too_many_string_formatting_arguments(replacements)
else:
if len(checkers) == 1:
check_node, check_type = checkers[0]
if isinstance(rhs_type, TupleType) and len(rhs_type.items) == 1:
check_type(rhs_type.items[0])
else:
check_node(replacements)
elif isinstance(replacements, TupleExpr) and not any(
isinstance(item, StarExpr) for item in replacements.items
):
for checks, rep_node in zip(checkers, replacements.items):
check_node, check_type = checks
check_node(rep_node)
else:
for checks, rep_type in zip(checkers, rep_types):
check_node, check_type = checks
check_type(rep_type)
def check_mapping_str_interpolation(
self,
specifiers: list[ConversionSpecifier],
replacements: Expression,
expr: FormatStringExpr,
) -> None:
"""Check % string interpolation with names specifiers '%(name)s' % {'name': 'John'}."""
if isinstance(replacements, DictExpr) and all(
isinstance(k, (StrExpr, BytesExpr)) for k, v in replacements.items
):
mapping: dict[str, Type] = {}
for k, v in replacements.items:
if isinstance(expr, BytesExpr):
# Special case: for bytes formatting keys must be bytes.
if not isinstance(k, BytesExpr):
self.msg.fail(
"Dictionary keys in bytes formatting must be bytes, not strings",
expr,
code=codes.STRING_FORMATTING,
)
key_str = cast(FormatStringExpr, k).value
mapping[key_str] = self.accept(v)
for specifier in specifiers:
if specifier.conv_type == "%":
# %% is allowed in mappings, no checking is required
continue
assert specifier.key is not None
if specifier.key not in mapping:
self.msg.key_not_in_mapping(specifier.key, replacements)
return
rep_type = mapping[specifier.key]
assert specifier.conv_type is not None
expected_type = self.conversion_type(specifier.conv_type, replacements, expr)
if expected_type is None:
return
self.chk.check_subtype(
rep_type,
expected_type,
replacements,
message_registry.INCOMPATIBLE_TYPES_IN_STR_INTERPOLATION,
"expression has type",
f"placeholder with key '{specifier.key}' has type",
code=codes.STRING_FORMATTING,
)
if specifier.conv_type == "s":
self.check_s_special_cases(expr, rep_type, expr)
else:
rep_type = self.accept(replacements)
dict_type = self.build_dict_type(expr)
self.chk.check_subtype(
rep_type,
dict_type,
replacements,
message_registry.FORMAT_REQUIRES_MAPPING,
"expression has type",
"expected type for mapping is",
code=codes.STRING_FORMATTING,
)
def build_dict_type(self, expr: FormatStringExpr) -> Type:
"""Build expected mapping type for right operand in % formatting."""
any_type = AnyType(TypeOfAny.special_form)
if isinstance(expr, BytesExpr):
bytes_type = self.chk.named_generic_type("builtins.bytes", [])
return self.chk.named_generic_type(
"_typeshed.SupportsKeysAndGetItem", [bytes_type, any_type]
)
elif isinstance(expr, StrExpr):
str_type = self.chk.named_generic_type("builtins.str", [])
return self.chk.named_generic_type(
"_typeshed.SupportsKeysAndGetItem", [str_type, any_type]
)
else:
assert False, "Unreachable"
def build_replacement_checkers(
self, specifiers: list[ConversionSpecifier], context: Context, expr: FormatStringExpr
) -> list[Checkers] | None:
checkers: list[Checkers] = []
for specifier in specifiers:
checker = self.replacement_checkers(specifier, context, expr)
if checker is None:
return None
checkers.extend(checker)
return checkers
def replacement_checkers(
self, specifier: ConversionSpecifier, context: Context, expr: FormatStringExpr
) -> list[Checkers] | None:
"""Returns a list of tuples of two functions that check whether a replacement is
of the right type for the specifier. The first function takes a node and checks
its type in the right type context. The second function just checks a type.
"""
checkers: list[Checkers] = []
if specifier.width == "*":
checkers.append(self.checkers_for_star(context))
if specifier.precision == "*":
checkers.append(self.checkers_for_star(context))
if specifier.conv_type == "c":
c = self.checkers_for_c_type(specifier.conv_type, context, expr)
if c is None:
return None
checkers.append(c)
elif specifier.conv_type is not None and specifier.conv_type != "%":
c = self.checkers_for_regular_type(specifier.conv_type, context, expr)
if c is None:
return None
checkers.append(c)
return checkers
def checkers_for_star(self, context: Context) -> Checkers:
"""Returns a tuple of check functions that check whether, respectively,
a node or a type is compatible with a star in a conversion specifier.
"""
expected = self.named_type("builtins.int")
def check_type(type: Type) -> bool:
expected = self.named_type("builtins.int")
return self.chk.check_subtype(
type, expected, context, "* wants int", code=codes.STRING_FORMATTING
)
def check_expr(expr: Expression) -> None:
type = self.accept(expr, expected)
check_type(type)
return check_expr, check_type
def check_placeholder_type(self, typ: Type, expected_type: Type, context: Context) -> bool:
return self.chk.check_subtype(
typ,
expected_type,
context,
message_registry.INCOMPATIBLE_TYPES_IN_STR_INTERPOLATION,
"expression has type",
"placeholder has type",
code=codes.STRING_FORMATTING,
)
def checkers_for_regular_type(
self, conv_type: str, context: Context, expr: FormatStringExpr
) -> Checkers | None:
"""Returns a tuple of check functions that check whether, respectively,
a node or a type is compatible with 'type'. Return None in case of an error.
"""
expected_type = self.conversion_type(conv_type, context, expr)
if expected_type is None:
return None
def check_type(typ: Type) -> bool:
assert expected_type is not None
ret = self.check_placeholder_type(typ, expected_type, context)
if ret and conv_type == "s":
ret = self.check_s_special_cases(expr, typ, context)
return ret
def check_expr(expr: Expression) -> None:
type = self.accept(expr, expected_type)
check_type(type)
return check_expr, check_type
def check_s_special_cases(self, expr: FormatStringExpr, typ: Type, context: Context) -> bool:
"""Additional special cases for %s in bytes vs string context."""
if isinstance(expr, StrExpr):
# Couple special cases for string formatting.
if has_type_component(typ, "builtins.bytes"):
self.msg.fail(
'If x = b\'abc\' then "%s" % x produces "b\'abc\'", not "abc". '
'If this is desired behavior use "%r" % x. Otherwise, decode the bytes',
context,
code=codes.STR_BYTES_PY3,
)
return False
if isinstance(expr, BytesExpr):
# A special case for bytes formatting: b'%s' actually requires bytes on Python 3.
if has_type_component(typ, "builtins.str"):
self.msg.fail(
"On Python 3 b'%s' requires bytes, not string",
context,
code=codes.STRING_FORMATTING,
)
return False
return True
def checkers_for_c_type(
self, type: str, context: Context, format_expr: FormatStringExpr
) -> Checkers | None:
"""Returns a tuple of check functions that check whether, respectively,
a node or a type is compatible with 'type' that is a character type.
"""
expected_type = self.conversion_type(type, context, format_expr)
if expected_type is None:
return None
def check_type(type: Type) -> bool:
assert expected_type is not None
if isinstance(format_expr, BytesExpr):
err_msg = '"%c" requires an integer in range(256) or a single byte'
else:
err_msg = '"%c" requires int or char'
return self.chk.check_subtype(
type,
expected_type,
context,
err_msg,
"expression has type",
code=codes.STRING_FORMATTING,
)
def check_expr(expr: Expression) -> None:
"""int, or str with length 1"""
type = self.accept(expr, expected_type)
# We need further check with expr to make sure that
# it has exact one char or one single byte.
if check_type(type):
# Python 3 doesn't support b'%c' % str
if (
isinstance(format_expr, BytesExpr)
and isinstance(expr, BytesExpr)
and len(expr.value) != 1
):
self.msg.requires_int_or_single_byte(context)
elif isinstance(expr, (StrExpr, BytesExpr)) and len(expr.value) != 1:
self.msg.requires_int_or_char(context)
return check_expr, check_type
def conversion_type(
self, p: str, context: Context, expr: FormatStringExpr, format_call: bool = False
) -> Type | None:
"""Return the type that is accepted for a string interpolation conversion specifier type.
Note that both Python's float (e.g. %f) and integer (e.g. %d)
specifier types accept both float and integers.
The 'format_call' argument indicates whether this type came from % interpolation or from
a str.format() call, the meaning of few formatting types are different.
"""
NUMERIC_TYPES = NUMERIC_TYPES_NEW if format_call else NUMERIC_TYPES_OLD
INT_TYPES = REQUIRE_INT_NEW if format_call else REQUIRE_INT_OLD
if p == "b" and not format_call:
if not isinstance(expr, BytesExpr):
self.msg.fail(
'Format character "b" is only supported on bytes patterns',
context,
code=codes.STRING_FORMATTING,
)
return None
return self.named_type("builtins.bytes")
elif p == "a":
# TODO: return type object?
return AnyType(TypeOfAny.special_form)
elif p in ["s", "r"]:
return AnyType(TypeOfAny.special_form)
elif p in NUMERIC_TYPES:
if p in INT_TYPES:
numeric_types = [self.named_type("builtins.int")]
else:
numeric_types = [
self.named_type("builtins.int"),
self.named_type("builtins.float"),
]
if not format_call:
if p in FLOAT_TYPES:
numeric_types.append(self.named_type("typing.SupportsFloat"))
else:
numeric_types.append(self.named_type("typing.SupportsInt"))
return UnionType.make_union(numeric_types)
elif p in ["c"]:
if isinstance(expr, BytesExpr):
return UnionType(
[self.named_type("builtins.int"), self.named_type("builtins.bytes")]
)
else:
return UnionType(
[self.named_type("builtins.int"), self.named_type("builtins.str")]
)
else:
self.msg.unsupported_placeholder(p, context)
return None
#
# Helpers
#
def named_type(self, name: str) -> Instance:
"""Return an instance type with type given by the name and no type
arguments. Alias for TypeChecker.named_type.
"""
return self.chk.named_type(name)
def accept(self, expr: Expression, context: Type | None = None) -> Type:
"""Type check a node. Alias for TypeChecker.accept."""
return self.chk.expr_checker.accept(expr, context)
def has_type_component(typ: Type, fullname: str) -> bool:
"""Is this a specific instance type, or a union that contains it?
We use this ad-hoc function instead of a proper visitor or subtype check
because some str vs bytes errors are strictly speaking not runtime errors,
but rather highly counter-intuitive behavior. This is similar to what is used for
--strict-equality.
"""
typ = get_proper_type(typ)
if isinstance(typ, Instance):
return typ.type.has_base(fullname)
elif isinstance(typ, TypeVarType):
return has_type_component(typ.upper_bound, fullname) or any(
has_type_component(v, fullname) for v in typ.values
)
elif isinstance(typ, UnionType):
return any(has_type_component(t, fullname) for t in typ.relevant_items())
return False
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/checkstrformat.py
|
Python
|
NOASSERTION
| 46,273 |
from __future__ import annotations
import argparse
import configparser
import glob as fileglob
import os
import re
import sys
from io import StringIO
from mypy.errorcodes import error_codes
if sys.version_info >= (3, 11):
import tomllib
else:
import tomli as tomllib
from typing import (
Any,
Callable,
Dict,
Final,
Iterable,
List,
Mapping,
MutableMapping,
Sequence,
TextIO,
Tuple,
Union,
)
from typing_extensions import TypeAlias as _TypeAlias
from mypy import defaults
from mypy.options import PER_MODULE_OPTIONS, Options
_CONFIG_VALUE_TYPES: _TypeAlias = Union[
str, bool, int, float, Dict[str, str], List[str], Tuple[int, int]
]
_INI_PARSER_CALLABLE: _TypeAlias = Callable[[Any], _CONFIG_VALUE_TYPES]
def parse_version(v: str | float) -> tuple[int, int]:
m = re.match(r"\A(\d)\.(\d+)\Z", str(v))
if not m:
raise argparse.ArgumentTypeError(f"Invalid python version '{v}' (expected format: 'x.y')")
major, minor = int(m.group(1)), int(m.group(2))
if major == 2 and minor == 7:
pass # Error raised elsewhere
elif major == 3:
if minor < defaults.PYTHON3_VERSION_MIN[1]:
msg = "Python 3.{} is not supported (must be {}.{} or higher)".format(
minor, *defaults.PYTHON3_VERSION_MIN
)
if isinstance(v, float):
msg += ". You may need to put quotes around your Python version"
raise argparse.ArgumentTypeError(msg)
else:
raise argparse.ArgumentTypeError(
f"Python major version '{major}' out of range (must be 3)"
)
return major, minor
def try_split(v: str | Sequence[str], split_regex: str = "[,]") -> list[str]:
"""Split and trim a str or list of str into a list of str"""
if isinstance(v, str):
return [p.strip() for p in re.split(split_regex, v)]
return [p.strip() for p in v]
def validate_codes(codes: list[str]) -> list[str]:
invalid_codes = set(codes) - set(error_codes.keys())
if invalid_codes:
raise argparse.ArgumentTypeError(
f"Invalid error code(s): {', '.join(sorted(invalid_codes))}"
)
return codes
def validate_package_allow_list(allow_list: list[str]) -> list[str]:
for p in allow_list:
msg = f"Invalid allow list entry: {p}"
if "*" in p:
raise argparse.ArgumentTypeError(
f"{msg} (entries are already prefixes so must not contain *)"
)
if "\\" in p or "/" in p:
raise argparse.ArgumentTypeError(
f"{msg} (entries must be packages like foo.bar not directories or files)"
)
return allow_list
def expand_path(path: str) -> str:
"""Expand the user home directory and any environment variables contained within
the provided path.
"""
return os.path.expandvars(os.path.expanduser(path))
def str_or_array_as_list(v: str | Sequence[str]) -> list[str]:
if isinstance(v, str):
return [v.strip()] if v.strip() else []
return [p.strip() for p in v if p.strip()]
def split_and_match_files_list(paths: Sequence[str]) -> list[str]:
"""Take a list of files/directories (with support for globbing through the glob library).
Where a path/glob matches no file, we still include the raw path in the resulting list.
Returns a list of file paths
"""
expanded_paths = []
for path in paths:
path = expand_path(path.strip())
globbed_files = fileglob.glob(path, recursive=True)
if globbed_files:
expanded_paths.extend(globbed_files)
else:
expanded_paths.append(path)
return expanded_paths
def split_and_match_files(paths: str) -> list[str]:
"""Take a string representing a list of files/directories (with support for globbing
through the glob library).
Where a path/glob matches no file, we still include the raw path in the resulting list.
Returns a list of file paths
"""
return split_and_match_files_list(paths.split(","))
def check_follow_imports(choice: str) -> str:
choices = ["normal", "silent", "skip", "error"]
if choice not in choices:
raise argparse.ArgumentTypeError(
"invalid choice '{}' (choose from {})".format(
choice, ", ".join(f"'{x}'" for x in choices)
)
)
return choice
def check_junit_format(choice: str) -> str:
choices = ["global", "per_file"]
if choice not in choices:
raise argparse.ArgumentTypeError(
"invalid choice '{}' (choose from {})".format(
choice, ", ".join(f"'{x}'" for x in choices)
)
)
return choice
def split_commas(value: str) -> list[str]:
# Uses a bit smarter technique to allow last trailing comma
# and to remove last `""` item from the split.
items = value.split(",")
if items and items[-1] == "":
items.pop(-1)
return items
# For most options, the type of the default value set in options.py is
# sufficient, and we don't have to do anything here. This table
# exists to specify types for values initialized to None or container
# types.
ini_config_types: Final[dict[str, _INI_PARSER_CALLABLE]] = {
"python_version": parse_version,
"custom_typing_module": str,
"custom_typeshed_dir": expand_path,
"mypy_path": lambda s: [expand_path(p.strip()) for p in re.split("[,:]", s)],
"files": split_and_match_files,
"quickstart_file": expand_path,
"junit_xml": expand_path,
"junit_format": check_junit_format,
"follow_imports": check_follow_imports,
"no_site_packages": bool,
"plugins": lambda s: [p.strip() for p in split_commas(s)],
"always_true": lambda s: [p.strip() for p in split_commas(s)],
"always_false": lambda s: [p.strip() for p in split_commas(s)],
"untyped_calls_exclude": lambda s: validate_package_allow_list(
[p.strip() for p in split_commas(s)]
),
"enable_incomplete_feature": lambda s: [p.strip() for p in split_commas(s)],
"disable_error_code": lambda s: validate_codes([p.strip() for p in split_commas(s)]),
"enable_error_code": lambda s: validate_codes([p.strip() for p in split_commas(s)]),
"package_root": lambda s: [p.strip() for p in split_commas(s)],
"cache_dir": expand_path,
"python_executable": expand_path,
"strict": bool,
"exclude": lambda s: [s.strip()],
"packages": try_split,
"modules": try_split,
}
# Reuse the ini_config_types and overwrite the diff
toml_config_types: Final[dict[str, _INI_PARSER_CALLABLE]] = ini_config_types.copy()
toml_config_types.update(
{
"python_version": parse_version,
"mypy_path": lambda s: [expand_path(p) for p in try_split(s, "[,:]")],
"files": lambda s: split_and_match_files_list(try_split(s)),
"junit_format": lambda s: check_junit_format(str(s)),
"follow_imports": lambda s: check_follow_imports(str(s)),
"plugins": try_split,
"always_true": try_split,
"always_false": try_split,
"untyped_calls_exclude": lambda s: validate_package_allow_list(try_split(s)),
"enable_incomplete_feature": try_split,
"disable_error_code": lambda s: validate_codes(try_split(s)),
"enable_error_code": lambda s: validate_codes(try_split(s)),
"package_root": try_split,
"exclude": str_or_array_as_list,
"packages": try_split,
"modules": try_split,
}
)
def parse_config_file(
options: Options,
set_strict_flags: Callable[[], None],
filename: str | None,
stdout: TextIO | None = None,
stderr: TextIO | None = None,
) -> None:
"""Parse a config file into an Options object.
Errors are written to stderr but are not fatal.
If filename is None, fall back to default config files.
"""
stdout = stdout or sys.stdout
stderr = stderr or sys.stderr
if filename is not None:
config_files: tuple[str, ...] = (filename,)
else:
config_files_iter: Iterable[str] = map(os.path.expanduser, defaults.CONFIG_FILES)
config_files = tuple(config_files_iter)
config_parser = configparser.RawConfigParser()
for config_file in config_files:
if not os.path.exists(config_file):
continue
try:
if is_toml(config_file):
with open(config_file, "rb") as f:
toml_data = tomllib.load(f)
# Filter down to just mypy relevant toml keys
toml_data = toml_data.get("tool", {})
if "mypy" not in toml_data:
continue
toml_data = {"mypy": toml_data["mypy"]}
parser: MutableMapping[str, Any] = destructure_overrides(toml_data)
config_types = toml_config_types
else:
config_parser.read(config_file)
parser = config_parser
config_types = ini_config_types
except (tomllib.TOMLDecodeError, configparser.Error, ConfigTOMLValueError) as err:
print(f"{config_file}: {err}", file=stderr)
else:
if config_file in defaults.SHARED_CONFIG_FILES and "mypy" not in parser:
continue
file_read = config_file
options.config_file = file_read
break
else:
return
os.environ["MYPY_CONFIG_FILE_DIR"] = os.path.dirname(os.path.abspath(config_file))
if "mypy" not in parser:
if filename or file_read not in defaults.SHARED_CONFIG_FILES:
print(f"{file_read}: No [mypy] section in config file", file=stderr)
else:
section = parser["mypy"]
prefix = f"{file_read}: [mypy]: "
updates, report_dirs = parse_section(
prefix, options, set_strict_flags, section, config_types, stderr
)
for k, v in updates.items():
setattr(options, k, v)
options.report_dirs.update(report_dirs)
for name, section in parser.items():
if name.startswith("mypy-"):
prefix = get_prefix(file_read, name)
updates, report_dirs = parse_section(
prefix, options, set_strict_flags, section, config_types, stderr
)
if report_dirs:
print(
prefix,
"Per-module sections should not specify reports ({})".format(
", ".join(s + "_report" for s in sorted(report_dirs))
),
file=stderr,
)
if set(updates) - PER_MODULE_OPTIONS:
print(
prefix,
"Per-module sections should only specify per-module flags ({})".format(
", ".join(sorted(set(updates) - PER_MODULE_OPTIONS))
),
file=stderr,
)
updates = {k: v for k, v in updates.items() if k in PER_MODULE_OPTIONS}
globs = name[5:]
for glob in globs.split(","):
# For backwards compatibility, replace (back)slashes with dots.
glob = glob.replace(os.sep, ".")
if os.altsep:
glob = glob.replace(os.altsep, ".")
if any(c in glob for c in "?[]!") or any(
"*" in x and x != "*" for x in glob.split(".")
):
print(
prefix,
"Patterns must be fully-qualified module names, optionally "
"with '*' in some components (e.g spam.*.eggs.*)",
file=stderr,
)
else:
options.per_module_options[glob] = updates
def get_prefix(file_read: str, name: str) -> str:
if is_toml(file_read):
module_name_str = 'module = "%s"' % "-".join(name.split("-")[1:])
else:
module_name_str = name
return f"{file_read}: [{module_name_str}]:"
def is_toml(filename: str) -> bool:
return filename.lower().endswith(".toml")
def destructure_overrides(toml_data: dict[str, Any]) -> dict[str, Any]:
"""Take the new [[tool.mypy.overrides]] section array in the pyproject.toml file,
and convert it back to a flatter structure that the existing config_parser can handle.
E.g. the following pyproject.toml file:
[[tool.mypy.overrides]]
module = [
"a.b",
"b.*"
]
disallow_untyped_defs = true
[[tool.mypy.overrides]]
module = 'c'
disallow_untyped_defs = false
Would map to the following config dict that it would have gotten from parsing an equivalent
ini file:
{
"mypy-a.b": {
disallow_untyped_defs = true,
},
"mypy-b.*": {
disallow_untyped_defs = true,
},
"mypy-c": {
disallow_untyped_defs: false,
},
}
"""
if "overrides" not in toml_data["mypy"]:
return toml_data
if not isinstance(toml_data["mypy"]["overrides"], list):
raise ConfigTOMLValueError(
"tool.mypy.overrides sections must be an array. Please make "
"sure you are using double brackets like so: [[tool.mypy.overrides]]"
)
result = toml_data.copy()
for override in result["mypy"]["overrides"]:
if "module" not in override:
raise ConfigTOMLValueError(
"toml config file contains a [[tool.mypy.overrides]] "
"section, but no module to override was specified."
)
if isinstance(override["module"], str):
modules = [override["module"]]
elif isinstance(override["module"], list):
modules = override["module"]
else:
raise ConfigTOMLValueError(
"toml config file contains a [[tool.mypy.overrides]] "
"section with a module value that is not a string or a list of "
"strings"
)
for module in modules:
module_overrides = override.copy()
del module_overrides["module"]
old_config_name = f"mypy-{module}"
if old_config_name not in result:
result[old_config_name] = module_overrides
else:
for new_key, new_value in module_overrides.items():
if (
new_key in result[old_config_name]
and result[old_config_name][new_key] != new_value
):
raise ConfigTOMLValueError(
"toml config file contains "
"[[tool.mypy.overrides]] sections with conflicting "
f"values. Module '{module}' has two different values for '{new_key}'"
)
result[old_config_name][new_key] = new_value
del result["mypy"]["overrides"]
return result
def parse_section(
prefix: str,
template: Options,
set_strict_flags: Callable[[], None],
section: Mapping[str, Any],
config_types: dict[str, Any],
stderr: TextIO = sys.stderr,
) -> tuple[dict[str, object], dict[str, str]]:
"""Parse one section of a config file.
Returns a dict of option values encountered, and a dict of report directories.
"""
results: dict[str, object] = {}
report_dirs: dict[str, str] = {}
# Because these fields exist on Options, without proactive checking, we would accept them
# and crash later
invalid_options = {
"enabled_error_codes": "enable_error_code",
"disabled_error_codes": "disable_error_code",
}
for key in section:
invert = False
options_key = key
if key in config_types:
ct = config_types[key]
elif key in invalid_options:
print(
f"{prefix}Unrecognized option: {key} = {section[key]}"
f" (did you mean {invalid_options[key]}?)",
file=stderr,
)
continue
else:
dv = None
# We have to keep new_semantic_analyzer in Options
# for plugin compatibility but it is not a valid option anymore.
assert hasattr(template, "new_semantic_analyzer")
if key != "new_semantic_analyzer":
dv = getattr(template, key, None)
if dv is None:
if key.endswith("_report"):
report_type = key[:-7].replace("_", "-")
if report_type in defaults.REPORTER_NAMES:
report_dirs[report_type] = str(section[key])
else:
print(f"{prefix}Unrecognized report type: {key}", file=stderr)
continue
if key.startswith("x_"):
pass # Don't complain about `x_blah` flags
elif key.startswith("no_") and hasattr(template, key[3:]):
options_key = key[3:]
invert = True
elif key.startswith("allow") and hasattr(template, "dis" + key):
options_key = "dis" + key
invert = True
elif key.startswith("disallow") and hasattr(template, key[3:]):
options_key = key[3:]
invert = True
elif key.startswith("show_") and hasattr(template, "hide_" + key[5:]):
options_key = "hide_" + key[5:]
invert = True
elif key == "strict":
pass # Special handling below
else:
print(f"{prefix}Unrecognized option: {key} = {section[key]}", file=stderr)
if invert:
dv = getattr(template, options_key, None)
else:
continue
ct = type(dv)
v: Any = None
try:
if ct is bool:
if isinstance(section, dict):
v = convert_to_boolean(section.get(key))
else:
v = section.getboolean(key) # type: ignore[attr-defined] # Until better stub
if invert:
v = not v
elif callable(ct):
if invert:
print(f"{prefix}Can not invert non-boolean key {options_key}", file=stderr)
continue
try:
v = ct(section.get(key))
except argparse.ArgumentTypeError as err:
print(f"{prefix}{key}: {err}", file=stderr)
continue
else:
print(f"{prefix}Don't know what type {key} should have", file=stderr)
continue
except ValueError as err:
print(f"{prefix}{key}: {err}", file=stderr)
continue
if key == "strict":
if v:
set_strict_flags()
continue
results[options_key] = v
# These two flags act as per-module overrides, so store the empty defaults.
if "disable_error_code" not in results:
results["disable_error_code"] = []
if "enable_error_code" not in results:
results["enable_error_code"] = []
return results, report_dirs
def convert_to_boolean(value: Any | None) -> bool:
"""Return a boolean value translating from other types if necessary."""
if isinstance(value, bool):
return value
if not isinstance(value, str):
value = str(value)
if value.lower() not in configparser.RawConfigParser.BOOLEAN_STATES:
raise ValueError(f"Not a boolean: {value}")
return configparser.RawConfigParser.BOOLEAN_STATES[value.lower()]
def split_directive(s: str) -> tuple[list[str], list[str]]:
"""Split s on commas, except during quoted sections.
Returns the parts and a list of error messages."""
parts = []
cur: list[str] = []
errors = []
i = 0
while i < len(s):
if s[i] == ",":
parts.append("".join(cur).strip())
cur = []
elif s[i] == '"':
i += 1
while i < len(s) and s[i] != '"':
cur.append(s[i])
i += 1
if i == len(s):
errors.append("Unterminated quote in configuration comment")
cur.clear()
else:
cur.append(s[i])
i += 1
if cur:
parts.append("".join(cur).strip())
return parts, errors
def mypy_comments_to_config_map(line: str, template: Options) -> tuple[dict[str, str], list[str]]:
"""Rewrite the mypy comment syntax into ini file syntax."""
options = {}
entries, errors = split_directive(line)
for entry in entries:
if "=" not in entry:
name = entry
value = None
else:
name, value = (x.strip() for x in entry.split("=", 1))
name = name.replace("-", "_")
if value is None:
value = "True"
options[name] = value
return options, errors
def parse_mypy_comments(
args: list[tuple[int, str]], template: Options
) -> tuple[dict[str, object], list[tuple[int, str]]]:
"""Parse a collection of inline mypy: configuration comments.
Returns a dictionary of options to be applied and a list of error messages
generated.
"""
errors: list[tuple[int, str]] = []
sections = {}
for lineno, line in args:
# In order to easily match the behavior for bools, we abuse configparser.
# Oddly, the only way to get the SectionProxy object with the getboolean
# method is to create a config parser.
parser = configparser.RawConfigParser()
options, parse_errors = mypy_comments_to_config_map(line, template)
parser["dummy"] = options
errors.extend((lineno, x) for x in parse_errors)
stderr = StringIO()
strict_found = False
def set_strict_flags() -> None:
nonlocal strict_found
strict_found = True
new_sections, reports = parse_section(
"", template, set_strict_flags, parser["dummy"], ini_config_types, stderr=stderr
)
errors.extend((lineno, x) for x in stderr.getvalue().strip().split("\n") if x)
if reports:
errors.append((lineno, "Reports not supported in inline configuration"))
if strict_found:
errors.append(
(
lineno,
'Setting "strict" not supported in inline configuration: specify it in '
"a configuration file instead, or set individual inline flags "
'(see "mypy -h" for the list of flags enabled in strict mode)',
)
)
sections.update(new_sections)
return sections, errors
def get_config_module_names(filename: str | None, modules: list[str]) -> str:
if not filename or not modules:
return ""
if not is_toml(filename):
return ", ".join(f"[mypy-{module}]" for module in modules)
return "module = ['%s']" % ("', '".join(sorted(modules)))
class ConfigTOMLValueError(ValueError):
pass
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/config_parser.py
|
Python
|
NOASSERTION
| 23,389 |
"""Constant folding of expressions.
For example, 3 + 5 can be constant folded into 8.
"""
from __future__ import annotations
from typing import Final, Union
from mypy.nodes import (
ComplexExpr,
Expression,
FloatExpr,
IntExpr,
NameExpr,
OpExpr,
StrExpr,
UnaryExpr,
Var,
)
# All possible result types of constant folding
ConstantValue = Union[int, bool, float, complex, str]
CONST_TYPES: Final = (int, bool, float, complex, str)
def constant_fold_expr(expr: Expression, cur_mod_id: str) -> ConstantValue | None:
"""Return the constant value of an expression for supported operations.
Among other things, support int arithmetic and string
concatenation. For example, the expression 3 + 5 has the constant
value 8.
Also bind simple references to final constants defined in the
current module (cur_mod_id). Binding to references is best effort
-- we don't bind references to other modules. Mypyc trusts these
to be correct in compiled modules, so that it can replace a
constant expression (or a reference to one) with the statically
computed value. We don't want to infer constant values based on
stubs, in particular, as these might not match the implementation
(due to version skew, for example).
Return None if unsuccessful.
"""
if isinstance(expr, IntExpr):
return expr.value
if isinstance(expr, StrExpr):
return expr.value
if isinstance(expr, FloatExpr):
return expr.value
if isinstance(expr, ComplexExpr):
return expr.value
elif isinstance(expr, NameExpr):
if expr.name == "True":
return True
elif expr.name == "False":
return False
node = expr.node
if (
isinstance(node, Var)
and node.is_final
and node.fullname.rsplit(".", 1)[0] == cur_mod_id
):
value = node.final_value
if isinstance(value, (CONST_TYPES)):
return value
elif isinstance(expr, OpExpr):
left = constant_fold_expr(expr.left, cur_mod_id)
right = constant_fold_expr(expr.right, cur_mod_id)
if left is not None and right is not None:
return constant_fold_binary_op(expr.op, left, right)
elif isinstance(expr, UnaryExpr):
value = constant_fold_expr(expr.expr, cur_mod_id)
if value is not None:
return constant_fold_unary_op(expr.op, value)
return None
def constant_fold_binary_op(
op: str, left: ConstantValue, right: ConstantValue
) -> ConstantValue | None:
if isinstance(left, int) and isinstance(right, int):
return constant_fold_binary_int_op(op, left, right)
# Float and mixed int/float arithmetic.
if isinstance(left, float) and isinstance(right, float):
return constant_fold_binary_float_op(op, left, right)
elif isinstance(left, float) and isinstance(right, int):
return constant_fold_binary_float_op(op, left, right)
elif isinstance(left, int) and isinstance(right, float):
return constant_fold_binary_float_op(op, left, right)
# String concatenation and multiplication.
if op == "+" and isinstance(left, str) and isinstance(right, str):
return left + right
elif op == "*" and isinstance(left, str) and isinstance(right, int):
return left * right
elif op == "*" and isinstance(left, int) and isinstance(right, str):
return left * right
# Complex construction.
if op == "+" and isinstance(left, (int, float)) and isinstance(right, complex):
return left + right
elif op == "+" and isinstance(left, complex) and isinstance(right, (int, float)):
return left + right
elif op == "-" and isinstance(left, (int, float)) and isinstance(right, complex):
return left - right
elif op == "-" and isinstance(left, complex) and isinstance(right, (int, float)):
return left - right
return None
def constant_fold_binary_int_op(op: str, left: int, right: int) -> int | float | None:
if op == "+":
return left + right
if op == "-":
return left - right
elif op == "*":
return left * right
elif op == "/":
if right != 0:
return left / right
elif op == "//":
if right != 0:
return left // right
elif op == "%":
if right != 0:
return left % right
elif op == "&":
return left & right
elif op == "|":
return left | right
elif op == "^":
return left ^ right
elif op == "<<":
if right >= 0:
return left << right
elif op == ">>":
if right >= 0:
return left >> right
elif op == "**":
if right >= 0:
ret = left**right
assert isinstance(ret, int)
return ret
return None
def constant_fold_binary_float_op(op: str, left: int | float, right: int | float) -> float | None:
assert not (isinstance(left, int) and isinstance(right, int)), (op, left, right)
if op == "+":
return left + right
elif op == "-":
return left - right
elif op == "*":
return left * right
elif op == "/":
if right != 0:
return left / right
elif op == "//":
if right != 0:
return left // right
elif op == "%":
if right != 0:
return left % right
elif op == "**":
if (left < 0 and isinstance(right, int)) or left > 0:
try:
ret = left**right
except OverflowError:
return None
else:
assert isinstance(ret, float), ret
return ret
return None
def constant_fold_unary_op(op: str, value: ConstantValue) -> int | float | None:
if op == "-" and isinstance(value, (int, float)):
return -value
elif op == "~" and isinstance(value, int):
return ~value
elif op == "+" and isinstance(value, (int, float)):
return value
return None
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/constant_fold.py
|
Python
|
NOASSERTION
| 6,071 |
"""Type inference constraints."""
from __future__ import annotations
from typing import TYPE_CHECKING, Final, Iterable, List, Sequence
import mypy.subtypes
import mypy.typeops
from mypy.argmap import ArgTypeExpander
from mypy.erasetype import erase_typevars
from mypy.maptype import map_instance_to_supertype
from mypy.nodes import (
ARG_OPT,
ARG_POS,
ARG_STAR,
ARG_STAR2,
CONTRAVARIANT,
COVARIANT,
ArgKind,
TypeInfo,
)
from mypy.types import (
TUPLE_LIKE_INSTANCE_NAMES,
AnyType,
CallableType,
DeletedType,
ErasedType,
Instance,
LiteralType,
NoneType,
NormalizedCallableType,
Overloaded,
Parameters,
ParamSpecType,
PartialType,
ProperType,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeOfAny,
TypeQuery,
TypeType,
TypeVarId,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
TypeVisitor,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
find_unpack_in_list,
get_proper_type,
has_recursive_types,
has_type_vars,
is_named_instance,
split_with_prefix_and_suffix,
)
from mypy.types_utils import is_union_with_any
from mypy.typestate import type_state
if TYPE_CHECKING:
from mypy.infer import ArgumentInferContext
SUBTYPE_OF: Final = 0
SUPERTYPE_OF: Final = 1
class Constraint:
"""A representation of a type constraint.
It can be either T <: type or T :> type (T is a type variable).
"""
type_var: TypeVarId
op = 0 # SUBTYPE_OF or SUPERTYPE_OF
target: Type
def __init__(self, type_var: TypeVarLikeType, op: int, target: Type) -> None:
self.type_var = type_var.id
self.op = op
# TODO: should we add "assert not isinstance(target, UnpackType)"?
# UnpackType is a synthetic type, and is never valid as a constraint target.
self.target = target
self.origin_type_var = type_var
# These are additional type variables that should be solved for together with type_var.
# TODO: A cleaner solution may be to modify the return type of infer_constraints()
# to include these instead, but this is a rather big refactoring.
self.extra_tvars: list[TypeVarLikeType] = []
def __repr__(self) -> str:
op_str = "<:"
if self.op == SUPERTYPE_OF:
op_str = ":>"
return f"{self.type_var} {op_str} {self.target}"
def __hash__(self) -> int:
return hash((self.type_var, self.op, self.target))
def __eq__(self, other: object) -> bool:
if not isinstance(other, Constraint):
return False
return (self.type_var, self.op, self.target) == (other.type_var, other.op, other.target)
def infer_constraints_for_callable(
callee: CallableType,
arg_types: Sequence[Type | None],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
formal_to_actual: list[list[int]],
context: ArgumentInferContext,
) -> list[Constraint]:
"""Infer type variable constraints for a callable and actual arguments.
Return a list of constraints.
"""
constraints: list[Constraint] = []
mapper = ArgTypeExpander(context)
param_spec = callee.param_spec()
param_spec_arg_types = []
param_spec_arg_names = []
param_spec_arg_kinds = []
incomplete_star_mapping = False
for i, actuals in enumerate(formal_to_actual):
for actual in actuals:
if actual is None and callee.arg_kinds[i] in (ARG_STAR, ARG_STAR2):
# We can't use arguments to infer ParamSpec constraint, if only some
# are present in the current inference pass.
incomplete_star_mapping = True
break
for i, actuals in enumerate(formal_to_actual):
if isinstance(callee.arg_types[i], UnpackType):
unpack_type = callee.arg_types[i]
assert isinstance(unpack_type, UnpackType)
# In this case we are binding all the actuals to *args,
# and we want a constraint that the typevar tuple being unpacked
# is equal to a type list of all the actuals.
actual_types = []
unpacked_type = get_proper_type(unpack_type.type)
if isinstance(unpacked_type, TypeVarTupleType):
tuple_instance = unpacked_type.tuple_fallback
elif isinstance(unpacked_type, TupleType):
tuple_instance = unpacked_type.partial_fallback
else:
assert False, "mypy bug: unhandled constraint inference case"
for actual in actuals:
actual_arg_type = arg_types[actual]
if actual_arg_type is None:
continue
expanded_actual = mapper.expand_actual_type(
actual_arg_type,
arg_kinds[actual],
callee.arg_names[i],
callee.arg_kinds[i],
allow_unpack=True,
)
if arg_kinds[actual] != ARG_STAR or isinstance(
get_proper_type(actual_arg_type), TupleType
):
actual_types.append(expanded_actual)
else:
# If we are expanding an iterable inside * actual, append a homogeneous item instead
actual_types.append(
UnpackType(tuple_instance.copy_modified(args=[expanded_actual]))
)
if isinstance(unpacked_type, TypeVarTupleType):
constraints.append(
Constraint(
unpacked_type,
SUPERTYPE_OF,
TupleType(actual_types, unpacked_type.tuple_fallback),
)
)
elif isinstance(unpacked_type, TupleType):
# Prefixes get converted to positional args, so technically the only case we
# should have here is like Tuple[Unpack[Ts], Y1, Y2, Y3]. If this turns out
# not to hold we can always handle the prefixes too.
inner_unpack = unpacked_type.items[0]
assert isinstance(inner_unpack, UnpackType)
inner_unpacked_type = get_proper_type(inner_unpack.type)
suffix_len = len(unpacked_type.items) - 1
if isinstance(inner_unpacked_type, TypeVarTupleType):
# Variadic item can be either *Ts...
constraints.append(
Constraint(
inner_unpacked_type,
SUPERTYPE_OF,
TupleType(
actual_types[:-suffix_len], inner_unpacked_type.tuple_fallback
),
)
)
else:
# ...or it can be a homogeneous tuple.
assert (
isinstance(inner_unpacked_type, Instance)
and inner_unpacked_type.type.fullname == "builtins.tuple"
)
for at in actual_types[:-suffix_len]:
constraints.extend(
infer_constraints(inner_unpacked_type.args[0], at, SUPERTYPE_OF)
)
# Now handle the suffix (if any).
if suffix_len:
for tt, at in zip(unpacked_type.items[1:], actual_types[-suffix_len:]):
constraints.extend(infer_constraints(tt, at, SUPERTYPE_OF))
else:
assert False, "mypy bug: unhandled constraint inference case"
else:
for actual in actuals:
actual_arg_type = arg_types[actual]
if actual_arg_type is None:
continue
if param_spec and callee.arg_kinds[i] in (ARG_STAR, ARG_STAR2):
# If actual arguments are mapped to ParamSpec type, we can't infer individual
# constraints, instead store them and infer single constraint at the end.
# It is impossible to map actual kind to formal kind, so use some heuristic.
# This inference is used as a fallback, so relying on heuristic should be OK.
if not incomplete_star_mapping:
param_spec_arg_types.append(
mapper.expand_actual_type(
actual_arg_type, arg_kinds[actual], None, arg_kinds[actual]
)
)
actual_kind = arg_kinds[actual]
param_spec_arg_kinds.append(
ARG_POS if actual_kind not in (ARG_STAR, ARG_STAR2) else actual_kind
)
param_spec_arg_names.append(arg_names[actual] if arg_names else None)
else:
actual_type = mapper.expand_actual_type(
actual_arg_type,
arg_kinds[actual],
callee.arg_names[i],
callee.arg_kinds[i],
)
c = infer_constraints(callee.arg_types[i], actual_type, SUPERTYPE_OF)
constraints.extend(c)
if (
param_spec
and not any(c.type_var == param_spec.id for c in constraints)
and not incomplete_star_mapping
):
# Use ParamSpec constraint from arguments only if there are no other constraints,
# since as explained above it is quite ad-hoc.
constraints.append(
Constraint(
param_spec,
SUPERTYPE_OF,
Parameters(
arg_types=param_spec_arg_types,
arg_kinds=param_spec_arg_kinds,
arg_names=param_spec_arg_names,
imprecise_arg_kinds=True,
),
)
)
if any(isinstance(v, ParamSpecType) for v in callee.variables):
# As a perf optimization filter imprecise constraints only when we can have them.
constraints = filter_imprecise_kinds(constraints)
return constraints
def infer_constraints(
template: Type, actual: Type, direction: int, skip_neg_op: bool = False
) -> list[Constraint]:
"""Infer type constraints.
Match a template type, which may contain type variable references,
recursively against a type which does not contain (the same) type
variable references. The result is a list of type constrains of
form 'T is a supertype/subtype of x', where T is a type variable
present in the template and x is a type without reference to type
variables present in the template.
Assume T and S are type variables. Now the following results can be
calculated (read as '(template, actual) --> result'):
(T, X) --> T :> X
(X[T], X[Y]) --> T <: Y and T :> Y
((T, T), (X, Y)) --> T :> X and T :> Y
((T, S), (X, Y)) --> T :> X and S :> Y
(X[T], Any) --> T <: Any and T :> Any
The constraints are represented as Constraint objects. If skip_neg_op == True,
then skip adding reverse (polymorphic) constraints (since this is already a call
to infer such constraints).
"""
if any(
get_proper_type(template) == get_proper_type(t)
and get_proper_type(actual) == get_proper_type(a)
for (t, a) in reversed(type_state.inferring)
):
return []
if has_recursive_types(template) or isinstance(get_proper_type(template), Instance):
# This case requires special care because it may cause infinite recursion.
# Note that we include Instances because the may be recursive as str(Sequence[str]).
if not has_type_vars(template):
# Return early on an empty branch.
return []
type_state.inferring.append((template, actual))
res = _infer_constraints(template, actual, direction, skip_neg_op)
type_state.inferring.pop()
return res
return _infer_constraints(template, actual, direction, skip_neg_op)
def _infer_constraints(
template: Type, actual: Type, direction: int, skip_neg_op: bool
) -> list[Constraint]:
orig_template = template
template = get_proper_type(template)
actual = get_proper_type(actual)
# Type inference shouldn't be affected by whether union types have been simplified.
# We however keep any ErasedType items, so that the caller will see it when using
# checkexpr.has_erased_component().
if isinstance(template, UnionType):
template = mypy.typeops.make_simplified_union(template.items, keep_erased=True)
if isinstance(actual, UnionType):
actual = mypy.typeops.make_simplified_union(actual.items, keep_erased=True)
# Ignore Any types from the type suggestion engine to avoid them
# causing us to infer Any in situations where a better job could
# be done otherwise. (This can produce false positives but that
# doesn't really matter because it is all heuristic anyway.)
if isinstance(actual, AnyType) and actual.type_of_any == TypeOfAny.suggestion_engine:
return []
# If the template is simply a type variable, emit a Constraint directly.
# We need to handle this case before handling Unions for two reasons:
# 1. "T <: Union[U1, U2]" is not equivalent to "T <: U1 or T <: U2",
# because T can itself be a union (notably, Union[U1, U2] itself).
# 2. "T :> Union[U1, U2]" is logically equivalent to "T :> U1 and
# T :> U2", but they are not equivalent to the constraint solver,
# which never introduces new Union types (it uses join() instead).
if isinstance(template, TypeVarType):
return [Constraint(template, direction, actual)]
if (
isinstance(actual, TypeVarType)
and not actual.id.is_meta_var()
and direction == SUPERTYPE_OF
):
# Unless template is also a type variable (or a union that contains one), using the upper
# bound for inference will usually give better result for actual that is a type variable.
if not isinstance(template, UnionType) or not any(
isinstance(t, TypeVarType) for t in template.items
):
actual = get_proper_type(actual.upper_bound)
# Now handle the case of either template or actual being a Union.
# For a Union to be a subtype of another type, every item of the Union
# must be a subtype of that type, so concatenate the constraints.
if direction == SUBTYPE_OF and isinstance(template, UnionType):
res = []
for t_item in template.items:
res.extend(infer_constraints(t_item, actual, direction))
return res
if direction == SUPERTYPE_OF and isinstance(actual, UnionType):
res = []
for a_item in actual.items:
res.extend(infer_constraints(orig_template, a_item, direction))
return res
# Now the potential subtype is known not to be a Union or a type
# variable that we are solving for. In that case, for a Union to
# be a supertype of the potential subtype, some item of the Union
# must be a supertype of it.
if direction == SUBTYPE_OF and isinstance(actual, UnionType):
# If some of items is not a complete type, disregard that.
items = simplify_away_incomplete_types(actual.items)
# We infer constraints eagerly -- try to find constraints for a type
# variable if possible. This seems to help with some real-world
# use cases.
return any_constraints(
[infer_constraints_if_possible(template, a_item, direction) for a_item in items],
eager=True,
)
if direction == SUPERTYPE_OF and isinstance(template, UnionType):
# When the template is a union, we are okay with leaving some
# type variables indeterminate. This helps with some special
# cases, though this isn't very principled.
result = any_constraints(
[
infer_constraints_if_possible(t_item, actual, direction)
for t_item in template.items
],
eager=False,
)
if result:
return result
elif has_recursive_types(template) and not has_recursive_types(actual):
return handle_recursive_union(template, actual, direction)
return []
# Remaining cases are handled by ConstraintBuilderVisitor.
return template.accept(ConstraintBuilderVisitor(actual, direction, skip_neg_op))
def infer_constraints_if_possible(
template: Type, actual: Type, direction: int
) -> list[Constraint] | None:
"""Like infer_constraints, but return None if the input relation is
known to be unsatisfiable, for example if template=List[T] and actual=int.
(In this case infer_constraints would return [], just like it would for
an automatically satisfied relation like template=List[T] and actual=object.)
"""
if direction == SUBTYPE_OF and not mypy.subtypes.is_subtype(erase_typevars(template), actual):
return None
if direction == SUPERTYPE_OF and not mypy.subtypes.is_subtype(
actual, erase_typevars(template)
):
return None
if (
direction == SUPERTYPE_OF
and isinstance(template, TypeVarType)
and not mypy.subtypes.is_subtype(actual, erase_typevars(template.upper_bound))
):
# This is not caught by the above branch because of the erase_typevars() call,
# that would return 'Any' for a type variable.
return None
return infer_constraints(template, actual, direction)
def select_trivial(options: Sequence[list[Constraint] | None]) -> list[list[Constraint]]:
"""Select only those lists where each item is a constraint against Any."""
res = []
for option in options:
if option is None:
continue
if all(isinstance(get_proper_type(c.target), AnyType) for c in option):
res.append(option)
return res
def merge_with_any(constraint: Constraint) -> Constraint:
"""Transform a constraint target into a union with given Any type."""
target = constraint.target
if is_union_with_any(target):
# Do not produce redundant unions.
return constraint
# TODO: if we will support multiple sources Any, use this here instead.
any_type = AnyType(TypeOfAny.implementation_artifact)
return Constraint(
constraint.origin_type_var,
constraint.op,
UnionType.make_union([target, any_type], target.line, target.column),
)
def handle_recursive_union(template: UnionType, actual: Type, direction: int) -> list[Constraint]:
# This is a hack to special-case things like Union[T, Inst[T]] in recursive types. Although
# it is quite arbitrary, it is a relatively common pattern, so we should handle it well.
# This function may be called when inferring against such union resulted in different
# constraints for each item. Normally we give up in such case, but here we instead split
# the union in two parts, and try inferring sequentially.
non_type_var_items = [t for t in template.items if not isinstance(t, TypeVarType)]
type_var_items = [t for t in template.items if isinstance(t, TypeVarType)]
return infer_constraints(
UnionType.make_union(non_type_var_items), actual, direction
) or infer_constraints(UnionType.make_union(type_var_items), actual, direction)
def any_constraints(options: list[list[Constraint] | None], eager: bool) -> list[Constraint]:
"""Deduce what we can from a collection of constraint lists.
It's a given that at least one of the lists must be satisfied. A
None element in the list of options represents an unsatisfiable
constraint and is ignored. Ignore empty constraint lists if eager
is true -- they are always trivially satisfiable.
"""
if eager:
valid_options = [option for option in options if option]
else:
valid_options = [option for option in options if option is not None]
if not valid_options:
return []
if len(valid_options) == 1:
return valid_options[0]
if all(is_same_constraints(valid_options[0], c) for c in valid_options[1:]):
# Multiple sets of constraints that are all the same. Just pick any one of them.
return valid_options[0]
if all(is_similar_constraints(valid_options[0], c) for c in valid_options[1:]):
# All options have same structure. In this case we can merge-in trivial
# options (i.e. those that only have Any) and try again.
# TODO: More generally, if a given (variable, direction) pair appears in
# every option, combine the bounds with meet/join always, not just for Any.
trivial_options = select_trivial(valid_options)
if trivial_options and len(trivial_options) < len(valid_options):
merged_options = []
for option in valid_options:
if option in trivial_options:
continue
if option is not None:
merged_option: list[Constraint] | None = [merge_with_any(c) for c in option]
else:
merged_option = None
merged_options.append(merged_option)
return any_constraints(list(merged_options), eager)
# If normal logic didn't work, try excluding trivially unsatisfiable constraint (due to
# upper bounds) from each option, and comparing them again.
filtered_options = [filter_satisfiable(o) for o in options]
if filtered_options != options:
return any_constraints(filtered_options, eager=eager)
# Otherwise, there are either no valid options or multiple, inconsistent valid
# options. Give up and deduce nothing.
return []
def filter_satisfiable(option: list[Constraint] | None) -> list[Constraint] | None:
"""Keep only constraints that can possibly be satisfied.
Currently, we filter out constraints where target is not a subtype of the upper bound.
Since those can be never satisfied. We may add more cases in future if it improves type
inference.
"""
if not option:
return option
satisfiable = []
for c in option:
if isinstance(c.origin_type_var, TypeVarType) and c.origin_type_var.values:
if any(
mypy.subtypes.is_subtype(c.target, value) for value in c.origin_type_var.values
):
satisfiable.append(c)
elif mypy.subtypes.is_subtype(c.target, c.origin_type_var.upper_bound):
satisfiable.append(c)
if not satisfiable:
return None
return satisfiable
def is_same_constraints(x: list[Constraint], y: list[Constraint]) -> bool:
for c1 in x:
if not any(is_same_constraint(c1, c2) for c2 in y):
return False
for c1 in y:
if not any(is_same_constraint(c1, c2) for c2 in x):
return False
return True
def is_same_constraint(c1: Constraint, c2: Constraint) -> bool:
# Ignore direction when comparing constraints against Any.
skip_op_check = isinstance(get_proper_type(c1.target), AnyType) and isinstance(
get_proper_type(c2.target), AnyType
)
return (
c1.type_var == c2.type_var
and (c1.op == c2.op or skip_op_check)
and mypy.subtypes.is_same_type(c1.target, c2.target)
)
def is_similar_constraints(x: list[Constraint], y: list[Constraint]) -> bool:
"""Check that two lists of constraints have similar structure.
This means that each list has same type variable plus direction pairs (i.e we
ignore the target). Except for constraints where target is Any type, there
we ignore direction as well.
"""
return _is_similar_constraints(x, y) and _is_similar_constraints(y, x)
def _is_similar_constraints(x: list[Constraint], y: list[Constraint]) -> bool:
"""Check that every constraint in the first list has a similar one in the second.
See docstring above for definition of similarity.
"""
for c1 in x:
has_similar = False
for c2 in y:
# Ignore direction when either constraint is against Any.
skip_op_check = isinstance(get_proper_type(c1.target), AnyType) or isinstance(
get_proper_type(c2.target), AnyType
)
if c1.type_var == c2.type_var and (c1.op == c2.op or skip_op_check):
has_similar = True
break
if not has_similar:
return False
return True
def simplify_away_incomplete_types(types: Iterable[Type]) -> list[Type]:
complete = [typ for typ in types if is_complete_type(typ)]
if complete:
return complete
else:
return list(types)
def is_complete_type(typ: Type) -> bool:
"""Is a type complete?
A complete doesn't have uninhabited type components or (when not in strict
optional mode) None components.
"""
return typ.accept(CompleteTypeVisitor())
class CompleteTypeVisitor(TypeQuery[bool]):
def __init__(self) -> None:
super().__init__(all)
def visit_uninhabited_type(self, t: UninhabitedType) -> bool:
return False
class ConstraintBuilderVisitor(TypeVisitor[List[Constraint]]):
"""Visitor class for inferring type constraints."""
# The type that is compared against a template
# TODO: The value may be None. Is that actually correct?
actual: ProperType
def __init__(self, actual: ProperType, direction: int, skip_neg_op: bool) -> None:
# Direction must be SUBTYPE_OF or SUPERTYPE_OF.
self.actual = actual
self.direction = direction
# Whether to skip polymorphic inference (involves inference in opposite direction)
# this is used to prevent infinite recursion when both template and actual are
# generic callables.
self.skip_neg_op = skip_neg_op
# Trivial leaf types
def visit_unbound_type(self, template: UnboundType) -> list[Constraint]:
return []
def visit_any(self, template: AnyType) -> list[Constraint]:
return []
def visit_none_type(self, template: NoneType) -> list[Constraint]:
return []
def visit_uninhabited_type(self, template: UninhabitedType) -> list[Constraint]:
return []
def visit_erased_type(self, template: ErasedType) -> list[Constraint]:
return []
def visit_deleted_type(self, template: DeletedType) -> list[Constraint]:
return []
def visit_literal_type(self, template: LiteralType) -> list[Constraint]:
return []
# Errors
def visit_partial_type(self, template: PartialType) -> list[Constraint]:
# We can't do anything useful with a partial type here.
assert False, "Internal error"
# Non-trivial leaf type
def visit_type_var(self, template: TypeVarType) -> list[Constraint]:
assert False, (
"Unexpected TypeVarType in ConstraintBuilderVisitor"
" (should have been handled in infer_constraints)"
)
def visit_param_spec(self, template: ParamSpecType) -> list[Constraint]:
# Can't infer ParamSpecs from component values (only via Callable[P, T]).
return []
def visit_type_var_tuple(self, template: TypeVarTupleType) -> list[Constraint]:
raise NotImplementedError
def visit_unpack_type(self, template: UnpackType) -> list[Constraint]:
raise RuntimeError("Mypy bug: unpack should be handled at a higher level.")
def visit_parameters(self, template: Parameters) -> list[Constraint]:
# Constraining Any against C[P] turns into infer_against_any([P], Any)
if isinstance(self.actual, AnyType):
return self.infer_against_any(template.arg_types, self.actual)
if type_state.infer_polymorphic and isinstance(self.actual, Parameters):
# For polymorphic inference we need to be able to infer secondary constraints
# in situations like [x: T] <: P <: [x: int].
return infer_callable_arguments_constraints(template, self.actual, self.direction)
if type_state.infer_polymorphic and isinstance(self.actual, ParamSpecType):
# Similar for [x: T] <: Q <: Concatenate[int, P].
return infer_callable_arguments_constraints(
template, self.actual.prefix, self.direction
)
# There also may be unpatched types after a user error, simply ignore them.
return []
# Non-leaf types
def visit_instance(self, template: Instance) -> list[Constraint]:
original_actual = actual = self.actual
res: list[Constraint] = []
if isinstance(actual, (CallableType, Overloaded)) and template.type.is_protocol:
if "__call__" in template.type.protocol_members:
# Special case: a generic callback protocol
if not any(template == t for t in template.type.inferring):
template.type.inferring.append(template)
call = mypy.subtypes.find_member(
"__call__", template, actual, is_operator=True
)
assert call is not None
if mypy.subtypes.is_subtype(actual, erase_typevars(call)):
subres = infer_constraints(call, actual, self.direction)
res.extend(subres)
template.type.inferring.pop()
if isinstance(actual, CallableType) and actual.fallback is not None:
if actual.is_type_obj() and template.type.is_protocol:
ret_type = get_proper_type(actual.ret_type)
if isinstance(ret_type, TupleType):
ret_type = mypy.typeops.tuple_fallback(ret_type)
if isinstance(ret_type, Instance):
if self.direction == SUBTYPE_OF:
subtype = template
else:
subtype = ret_type
res.extend(
self.infer_constraints_from_protocol_members(
ret_type, template, subtype, template, class_obj=True
)
)
actual = actual.fallback
if isinstance(actual, TypeType) and template.type.is_protocol:
if isinstance(actual.item, Instance):
if self.direction == SUBTYPE_OF:
subtype = template
else:
subtype = actual.item
res.extend(
self.infer_constraints_from_protocol_members(
actual.item, template, subtype, template, class_obj=True
)
)
if self.direction == SUPERTYPE_OF:
# Infer constraints for Type[T] via metaclass of T when it makes sense.
a_item = actual.item
if isinstance(a_item, TypeVarType):
a_item = get_proper_type(a_item.upper_bound)
if isinstance(a_item, Instance) and a_item.type.metaclass_type:
res.extend(
self.infer_constraints_from_protocol_members(
a_item.type.metaclass_type, template, actual, template
)
)
if isinstance(actual, Overloaded) and actual.fallback is not None:
actual = actual.fallback
if isinstance(actual, TypedDictType):
actual = actual.as_anonymous().fallback
if isinstance(actual, LiteralType):
actual = actual.fallback
if isinstance(actual, Instance):
instance = actual
erased = erase_typevars(template)
assert isinstance(erased, Instance) # type: ignore[misc]
# We always try nominal inference if possible,
# it is much faster than the structural one.
if self.direction == SUBTYPE_OF and template.type.has_base(instance.type.fullname):
mapped = map_instance_to_supertype(template, instance.type)
tvars = mapped.type.defn.type_vars
if instance.type.has_type_var_tuple_type:
# Variadic types need special handling to map each type argument to
# the correct corresponding type variable.
assert instance.type.type_var_tuple_prefix is not None
assert instance.type.type_var_tuple_suffix is not None
prefix_len = instance.type.type_var_tuple_prefix
suffix_len = instance.type.type_var_tuple_suffix
tvt = instance.type.defn.type_vars[prefix_len]
assert isinstance(tvt, TypeVarTupleType)
fallback = tvt.tuple_fallback
i_prefix, i_middle, i_suffix = split_with_prefix_and_suffix(
instance.args, prefix_len, suffix_len
)
m_prefix, m_middle, m_suffix = split_with_prefix_and_suffix(
mapped.args, prefix_len, suffix_len
)
instance_args = i_prefix + (TupleType(list(i_middle), fallback),) + i_suffix
mapped_args = m_prefix + (TupleType(list(m_middle), fallback),) + m_suffix
else:
mapped_args = mapped.args
instance_args = instance.args
# N.B: We use zip instead of indexing because the lengths might have
# mismatches during daemon reprocessing.
for tvar, mapped_arg, instance_arg in zip(tvars, mapped_args, instance_args):
if isinstance(tvar, TypeVarType):
# The constraints for generic type parameters depend on variance.
# Include constraints from both directions if invariant.
if tvar.variance != CONTRAVARIANT:
res.extend(infer_constraints(mapped_arg, instance_arg, self.direction))
if tvar.variance != COVARIANT:
res.extend(
infer_constraints(mapped_arg, instance_arg, neg_op(self.direction))
)
elif isinstance(tvar, ParamSpecType) and isinstance(mapped_arg, ParamSpecType):
prefix = mapped_arg.prefix
if isinstance(instance_arg, Parameters):
# No such thing as variance for ParamSpecs, consider them invariant
# TODO: constraints between prefixes using
# infer_callable_arguments_constraints()
suffix: Type = instance_arg.copy_modified(
instance_arg.arg_types[len(prefix.arg_types) :],
instance_arg.arg_kinds[len(prefix.arg_kinds) :],
instance_arg.arg_names[len(prefix.arg_names) :],
)
res.append(Constraint(mapped_arg, SUBTYPE_OF, suffix))
res.append(Constraint(mapped_arg, SUPERTYPE_OF, suffix))
elif isinstance(instance_arg, ParamSpecType):
suffix = instance_arg.copy_modified(
prefix=Parameters(
instance_arg.prefix.arg_types[len(prefix.arg_types) :],
instance_arg.prefix.arg_kinds[len(prefix.arg_kinds) :],
instance_arg.prefix.arg_names[len(prefix.arg_names) :],
)
)
res.append(Constraint(mapped_arg, SUBTYPE_OF, suffix))
res.append(Constraint(mapped_arg, SUPERTYPE_OF, suffix))
elif isinstance(tvar, TypeVarTupleType):
# Handle variadic type variables covariantly for consistency.
res.extend(infer_constraints(mapped_arg, instance_arg, self.direction))
return res
elif self.direction == SUPERTYPE_OF and instance.type.has_base(template.type.fullname):
mapped = map_instance_to_supertype(instance, template.type)
tvars = template.type.defn.type_vars
if template.type.has_type_var_tuple_type:
# Variadic types need special handling to map each type argument to
# the correct corresponding type variable.
assert template.type.type_var_tuple_prefix is not None
assert template.type.type_var_tuple_suffix is not None
prefix_len = template.type.type_var_tuple_prefix
suffix_len = template.type.type_var_tuple_suffix
tvt = template.type.defn.type_vars[prefix_len]
assert isinstance(tvt, TypeVarTupleType)
fallback = tvt.tuple_fallback
t_prefix, t_middle, t_suffix = split_with_prefix_and_suffix(
template.args, prefix_len, suffix_len
)
m_prefix, m_middle, m_suffix = split_with_prefix_and_suffix(
mapped.args, prefix_len, suffix_len
)
template_args = t_prefix + (TupleType(list(t_middle), fallback),) + t_suffix
mapped_args = m_prefix + (TupleType(list(m_middle), fallback),) + m_suffix
else:
mapped_args = mapped.args
template_args = template.args
# N.B: We use zip instead of indexing because the lengths might have
# mismatches during daemon reprocessing.
for tvar, mapped_arg, template_arg in zip(tvars, mapped_args, template_args):
if isinstance(tvar, TypeVarType):
# The constraints for generic type parameters depend on variance.
# Include constraints from both directions if invariant.
if tvar.variance != CONTRAVARIANT:
res.extend(infer_constraints(template_arg, mapped_arg, self.direction))
if tvar.variance != COVARIANT:
res.extend(
infer_constraints(template_arg, mapped_arg, neg_op(self.direction))
)
elif isinstance(tvar, ParamSpecType) and isinstance(
template_arg, ParamSpecType
):
prefix = template_arg.prefix
if isinstance(mapped_arg, Parameters):
# No such thing as variance for ParamSpecs, consider them invariant
# TODO: constraints between prefixes using
# infer_callable_arguments_constraints()
suffix = mapped_arg.copy_modified(
mapped_arg.arg_types[len(prefix.arg_types) :],
mapped_arg.arg_kinds[len(prefix.arg_kinds) :],
mapped_arg.arg_names[len(prefix.arg_names) :],
)
res.append(Constraint(template_arg, SUBTYPE_OF, suffix))
res.append(Constraint(template_arg, SUPERTYPE_OF, suffix))
elif isinstance(mapped_arg, ParamSpecType):
suffix = mapped_arg.copy_modified(
prefix=Parameters(
mapped_arg.prefix.arg_types[len(prefix.arg_types) :],
mapped_arg.prefix.arg_kinds[len(prefix.arg_kinds) :],
mapped_arg.prefix.arg_names[len(prefix.arg_names) :],
)
)
res.append(Constraint(template_arg, SUBTYPE_OF, suffix))
res.append(Constraint(template_arg, SUPERTYPE_OF, suffix))
elif isinstance(tvar, TypeVarTupleType):
# Consider variadic type variables to be invariant.
res.extend(infer_constraints(template_arg, mapped_arg, SUBTYPE_OF))
res.extend(infer_constraints(template_arg, mapped_arg, SUPERTYPE_OF))
return res
if (
template.type.is_protocol
and self.direction == SUPERTYPE_OF
and
# We avoid infinite recursion for structural subtypes by checking
# whether this type already appeared in the inference chain.
# This is a conservative way to break the inference cycles.
# It never produces any "false" constraints but gives up soon
# on purely structural inference cycles, see #3829.
# Note that we use is_protocol_implementation instead of is_subtype
# because some type may be considered a subtype of a protocol
# due to _promote, but still not implement the protocol.
not any(template == t for t in reversed(template.type.inferring))
and mypy.subtypes.is_protocol_implementation(instance, erased, skip=["__call__"])
):
template.type.inferring.append(template)
res.extend(
self.infer_constraints_from_protocol_members(
instance, template, original_actual, template
)
)
template.type.inferring.pop()
return res
elif (
instance.type.is_protocol
and self.direction == SUBTYPE_OF
and
# We avoid infinite recursion for structural subtypes also here.
not any(instance == i for i in reversed(instance.type.inferring))
and mypy.subtypes.is_protocol_implementation(erased, instance, skip=["__call__"])
):
instance.type.inferring.append(instance)
res.extend(
self.infer_constraints_from_protocol_members(
instance, template, template, instance
)
)
instance.type.inferring.pop()
return res
if res:
return res
if isinstance(actual, AnyType):
return self.infer_against_any(template.args, actual)
if (
isinstance(actual, TupleType)
and is_named_instance(template, TUPLE_LIKE_INSTANCE_NAMES)
and self.direction == SUPERTYPE_OF
):
for item in actual.items:
if isinstance(item, UnpackType):
unpacked = get_proper_type(item.type)
if isinstance(unpacked, TypeVarTupleType):
# Cannot infer anything for T from [T, ...] <: *Ts
continue
assert (
isinstance(unpacked, Instance)
and unpacked.type.fullname == "builtins.tuple"
)
item = unpacked.args[0]
cb = infer_constraints(template.args[0], item, SUPERTYPE_OF)
res.extend(cb)
return res
elif isinstance(actual, TupleType) and self.direction == SUPERTYPE_OF:
return infer_constraints(template, mypy.typeops.tuple_fallback(actual), self.direction)
elif isinstance(actual, TypeVarType):
if not actual.values and not actual.id.is_meta_var():
return infer_constraints(template, actual.upper_bound, self.direction)
return []
elif isinstance(actual, ParamSpecType):
return infer_constraints(template, actual.upper_bound, self.direction)
elif isinstance(actual, TypeVarTupleType):
raise NotImplementedError
else:
return []
def infer_constraints_from_protocol_members(
self,
instance: Instance,
template: Instance,
subtype: Type,
protocol: Instance,
class_obj: bool = False,
) -> list[Constraint]:
"""Infer constraints for situations where either 'template' or 'instance' is a protocol.
The 'protocol' is the one of two that is an instance of protocol type, 'subtype'
is the type used to bind self during inference. Currently, we just infer constrains for
every protocol member type (both ways for settable members).
"""
res = []
for member in protocol.type.protocol_members:
inst = mypy.subtypes.find_member(member, instance, subtype, class_obj=class_obj)
temp = mypy.subtypes.find_member(member, template, subtype)
if inst is None or temp is None:
if member == "__call__":
continue
return [] # See #11020
# The above is safe since at this point we know that 'instance' is a subtype
# of (erased) 'template', therefore it defines all protocol members
res.extend(infer_constraints(temp, inst, self.direction))
if mypy.subtypes.IS_SETTABLE in mypy.subtypes.get_member_flags(member, protocol):
# Settable members are invariant, add opposite constraints
res.extend(infer_constraints(temp, inst, neg_op(self.direction)))
return res
def visit_callable_type(self, template: CallableType) -> list[Constraint]:
# Normalize callables before matching against each other.
# Note that non-normalized callables can be created in annotations
# using e.g. callback protocols.
# TODO: check that callables match? Ideally we should not infer constraints
# callables that can never be subtypes of one another in given direction.
template = template.with_unpacked_kwargs()
extra_tvars = False
if isinstance(self.actual, CallableType):
res: list[Constraint] = []
cactual = self.actual.with_unpacked_kwargs()
param_spec = template.param_spec()
template_ret_type, cactual_ret_type = template.ret_type, cactual.ret_type
if template.type_guard is not None and cactual.type_guard is not None:
template_ret_type = template.type_guard
cactual_ret_type = cactual.type_guard
elif template.type_guard is not None:
template_ret_type = AnyType(TypeOfAny.special_form)
elif cactual.type_guard is not None:
cactual_ret_type = AnyType(TypeOfAny.special_form)
if template.type_is is not None and cactual.type_is is not None:
template_ret_type = template.type_is
cactual_ret_type = cactual.type_is
elif template.type_is is not None:
template_ret_type = AnyType(TypeOfAny.special_form)
elif cactual.type_is is not None:
cactual_ret_type = AnyType(TypeOfAny.special_form)
res.extend(infer_constraints(template_ret_type, cactual_ret_type, self.direction))
if param_spec is None:
# TODO: Erase template variables if it is generic?
if (
type_state.infer_polymorphic
and cactual.variables
and not self.skip_neg_op
# Technically, the correct inferred type for application of e.g.
# Callable[..., T] -> Callable[..., T] (with literal ellipsis), to a generic
# like U -> U, should be Callable[..., Any], but if U is a self-type, we can
# allow it to leak, to be later bound to self. A bunch of existing code
# depends on this old behaviour.
and not any(tv.id.is_self() for tv in cactual.variables)
):
# If the actual callable is generic, infer constraints in the opposite
# direction, and indicate to the solver there are extra type variables
# to solve for (see more details in mypy/solve.py).
res.extend(
infer_constraints(
cactual, template, neg_op(self.direction), skip_neg_op=True
)
)
extra_tvars = True
# We can't infer constraints from arguments if the template is Callable[..., T]
# (with literal '...').
if not template.is_ellipsis_args:
unpack_present = find_unpack_in_list(template.arg_types)
# When both ParamSpec and TypeVarTuple are present, things become messy
# quickly. For now, we only allow ParamSpec to "capture" TypeVarTuple,
# but not vice versa.
# TODO: infer more from prefixes when possible.
if unpack_present is not None and not cactual.param_spec():
# We need to re-normalize args to the form they appear in tuples,
# for callables we always pack the suffix inside another tuple.
unpack = template.arg_types[unpack_present]
assert isinstance(unpack, UnpackType)
tuple_type = get_tuple_fallback_from_unpack(unpack)
template_types = repack_callable_args(template, tuple_type)
actual_types = repack_callable_args(cactual, tuple_type)
# Now we can use the same general helper as for tuple types.
unpack_constraints = build_constraints_for_simple_unpack(
template_types, actual_types, neg_op(self.direction)
)
res.extend(unpack_constraints)
else:
# TODO: do we need some special-casing when unpack is present in actual
# callable but not in template callable?
res.extend(
infer_callable_arguments_constraints(template, cactual, self.direction)
)
else:
prefix = param_spec.prefix
prefix_len = len(prefix.arg_types)
cactual_ps = cactual.param_spec()
if type_state.infer_polymorphic and cactual.variables and not self.skip_neg_op:
# Similar logic to the branch above.
res.extend(
infer_constraints(
cactual, template, neg_op(self.direction), skip_neg_op=True
)
)
extra_tvars = True
# Compare prefixes as well
cactual_prefix = cactual.copy_modified(
arg_types=cactual.arg_types[:prefix_len],
arg_kinds=cactual.arg_kinds[:prefix_len],
arg_names=cactual.arg_names[:prefix_len],
)
res.extend(
infer_callable_arguments_constraints(prefix, cactual_prefix, self.direction)
)
param_spec_target: Type | None = None
if not cactual_ps:
max_prefix_len = len([k for k in cactual.arg_kinds if k in (ARG_POS, ARG_OPT)])
prefix_len = min(prefix_len, max_prefix_len)
param_spec_target = Parameters(
arg_types=cactual.arg_types[prefix_len:],
arg_kinds=cactual.arg_kinds[prefix_len:],
arg_names=cactual.arg_names[prefix_len:],
variables=cactual.variables if not type_state.infer_polymorphic else [],
imprecise_arg_kinds=cactual.imprecise_arg_kinds,
)
else:
if len(param_spec.prefix.arg_types) <= len(cactual_ps.prefix.arg_types):
param_spec_target = cactual_ps.copy_modified(
prefix=Parameters(
arg_types=cactual_ps.prefix.arg_types[prefix_len:],
arg_kinds=cactual_ps.prefix.arg_kinds[prefix_len:],
arg_names=cactual_ps.prefix.arg_names[prefix_len:],
imprecise_arg_kinds=cactual_ps.prefix.imprecise_arg_kinds,
)
)
if param_spec_target is not None:
res.append(Constraint(param_spec, self.direction, param_spec_target))
if extra_tvars:
for c in res:
c.extra_tvars += cactual.variables
return res
elif isinstance(self.actual, AnyType):
param_spec = template.param_spec()
any_type = AnyType(TypeOfAny.from_another_any, source_any=self.actual)
if param_spec is None:
# FIX what if generic
res = self.infer_against_any(template.arg_types, self.actual)
else:
res = [
Constraint(
param_spec,
SUBTYPE_OF,
Parameters([any_type, any_type], [ARG_STAR, ARG_STAR2], [None, None]),
)
]
res.extend(infer_constraints(template.ret_type, any_type, self.direction))
return res
elif isinstance(self.actual, Overloaded):
return self.infer_against_overloaded(self.actual, template)
elif isinstance(self.actual, TypeType):
return infer_constraints(template.ret_type, self.actual.item, self.direction)
elif isinstance(self.actual, Instance):
# Instances with __call__ method defined are considered structural
# subtypes of Callable with a compatible signature.
call = mypy.subtypes.find_member(
"__call__", self.actual, self.actual, is_operator=True
)
if call:
return infer_constraints(template, call, self.direction)
else:
return []
else:
return []
def infer_against_overloaded(
self, overloaded: Overloaded, template: CallableType
) -> list[Constraint]:
# Create constraints by matching an overloaded type against a template.
# This is tricky to do in general. We cheat by only matching against
# the first overload item that is callable compatible. This
# seems to work somewhat well, but we should really use a more
# reliable technique.
item = find_matching_overload_item(overloaded, template)
return infer_constraints(template, item, self.direction)
def visit_tuple_type(self, template: TupleType) -> list[Constraint]:
actual = self.actual
unpack_index = find_unpack_in_list(template.items)
is_varlength_tuple = (
isinstance(actual, Instance) and actual.type.fullname == "builtins.tuple"
)
if isinstance(actual, TupleType) or is_varlength_tuple:
res: list[Constraint] = []
if unpack_index is not None:
if is_varlength_tuple:
# Variadic tuple can be only a supertype of a tuple type, but even if
# direction is opposite, inferring something may give better error messages.
unpack_type = template.items[unpack_index]
assert isinstance(unpack_type, UnpackType)
unpacked_type = get_proper_type(unpack_type.type)
if isinstance(unpacked_type, TypeVarTupleType):
res = [
Constraint(type_var=unpacked_type, op=self.direction, target=actual)
]
else:
assert (
isinstance(unpacked_type, Instance)
and unpacked_type.type.fullname == "builtins.tuple"
)
res = infer_constraints(unpacked_type, actual, self.direction)
assert isinstance(actual, Instance) # ensured by is_varlength_tuple == True
for i, ti in enumerate(template.items):
if i == unpack_index:
# This one we just handled above.
continue
# For Tuple[T, *Ts, S] <: tuple[X, ...] infer also T <: X and S <: X.
res.extend(infer_constraints(ti, actual.args[0], self.direction))
return res
else:
assert isinstance(actual, TupleType)
unpack_constraints = build_constraints_for_simple_unpack(
template.items, actual.items, self.direction
)
actual_items: tuple[Type, ...] = ()
template_items: tuple[Type, ...] = ()
res.extend(unpack_constraints)
elif isinstance(actual, TupleType):
a_unpack_index = find_unpack_in_list(actual.items)
if a_unpack_index is not None:
# The case where template tuple doesn't have an unpack, but actual tuple
# has an unpack. We can infer something if actual unpack is a variadic tuple.
# Tuple[T, S, U] <: tuple[X, *tuple[Y, ...], Z] => T <: X, S <: Y, U <: Z.
a_unpack = actual.items[a_unpack_index]
assert isinstance(a_unpack, UnpackType)
a_unpacked = get_proper_type(a_unpack.type)
if len(actual.items) + 1 <= len(template.items):
a_prefix_len = a_unpack_index
a_suffix_len = len(actual.items) - a_unpack_index - 1
t_prefix, t_middle, t_suffix = split_with_prefix_and_suffix(
tuple(template.items), a_prefix_len, a_suffix_len
)
actual_items = tuple(actual.items[:a_prefix_len])
if a_suffix_len:
actual_items += tuple(actual.items[-a_suffix_len:])
template_items = t_prefix + t_suffix
if isinstance(a_unpacked, Instance):
assert a_unpacked.type.fullname == "builtins.tuple"
for tm in t_middle:
res.extend(
infer_constraints(tm, a_unpacked.args[0], self.direction)
)
else:
actual_items = ()
template_items = ()
else:
actual_items = tuple(actual.items)
template_items = tuple(template.items)
else:
return res
# Cases above will return if actual wasn't a TupleType.
assert isinstance(actual, TupleType)
if len(actual_items) == len(template_items):
if (
actual.partial_fallback.type.is_named_tuple
and template.partial_fallback.type.is_named_tuple
):
# For named tuples using just the fallbacks usually gives better results.
return res + infer_constraints(
template.partial_fallback, actual.partial_fallback, self.direction
)
for i in range(len(template_items)):
res.extend(
infer_constraints(template_items[i], actual_items[i], self.direction)
)
return res
elif isinstance(actual, AnyType):
return self.infer_against_any(template.items, actual)
else:
return []
def visit_typeddict_type(self, template: TypedDictType) -> list[Constraint]:
actual = self.actual
if isinstance(actual, TypedDictType):
res: list[Constraint] = []
# NOTE: Non-matching keys are ignored. Compatibility is checked
# elsewhere so this shouldn't be unsafe.
for item_name, template_item_type, actual_item_type in template.zip(actual):
res.extend(infer_constraints(template_item_type, actual_item_type, self.direction))
return res
elif isinstance(actual, AnyType):
return self.infer_against_any(template.items.values(), actual)
else:
return []
def visit_union_type(self, template: UnionType) -> list[Constraint]:
assert False, (
"Unexpected UnionType in ConstraintBuilderVisitor"
" (should have been handled in infer_constraints)"
)
def visit_type_alias_type(self, template: TypeAliasType) -> list[Constraint]:
assert False, f"This should be never called, got {template}"
def infer_against_any(self, types: Iterable[Type], any_type: AnyType) -> list[Constraint]:
res: list[Constraint] = []
for t in types:
if isinstance(t, UnpackType):
if isinstance(t.type, TypeVarTupleType):
res.append(Constraint(t.type, self.direction, any_type))
else:
unpacked = get_proper_type(t.type)
assert isinstance(unpacked, Instance)
res.extend(infer_constraints(unpacked, any_type, self.direction))
else:
# Note that we ignore variance and simply always use the
# original direction. This is because for Any targets direction is
# irrelevant in most cases, see e.g. is_same_constraint().
res.extend(infer_constraints(t, any_type, self.direction))
return res
def visit_overloaded(self, template: Overloaded) -> list[Constraint]:
if isinstance(self.actual, CallableType):
items = find_matching_overload_items(template, self.actual)
else:
items = template.items
res: list[Constraint] = []
for t in items:
res.extend(infer_constraints(t, self.actual, self.direction))
return res
def visit_type_type(self, template: TypeType) -> list[Constraint]:
if isinstance(self.actual, CallableType):
return infer_constraints(template.item, self.actual.ret_type, self.direction)
elif isinstance(self.actual, Overloaded):
return infer_constraints(template.item, self.actual.items[0].ret_type, self.direction)
elif isinstance(self.actual, TypeType):
return infer_constraints(template.item, self.actual.item, self.direction)
elif isinstance(self.actual, AnyType):
return infer_constraints(template.item, self.actual, self.direction)
else:
return []
def neg_op(op: int) -> int:
"""Map SubtypeOf to SupertypeOf and vice versa."""
if op == SUBTYPE_OF:
return SUPERTYPE_OF
elif op == SUPERTYPE_OF:
return SUBTYPE_OF
else:
raise ValueError(f"Invalid operator {op}")
def find_matching_overload_item(overloaded: Overloaded, template: CallableType) -> CallableType:
"""Disambiguate overload item against a template."""
items = overloaded.items
for item in items:
# Return type may be indeterminate in the template, so ignore it when performing a
# subtype check.
if mypy.subtypes.is_callable_compatible(
item,
template,
is_compat=mypy.subtypes.is_subtype,
is_proper_subtype=False,
ignore_return=True,
):
return item
# Fall back to the first item if we can't find a match. This is totally arbitrary --
# maybe we should just bail out at this point.
return items[0]
def find_matching_overload_items(
overloaded: Overloaded, template: CallableType
) -> list[CallableType]:
"""Like find_matching_overload_item, but return all matches, not just the first."""
items = overloaded.items
res = []
for item in items:
# Return type may be indeterminate in the template, so ignore it when performing a
# subtype check.
if mypy.subtypes.is_callable_compatible(
item,
template,
is_compat=mypy.subtypes.is_subtype,
is_proper_subtype=False,
ignore_return=True,
):
res.append(item)
if not res:
# Falling back to all items if we can't find a match is pretty arbitrary, but
# it maintains backward compatibility.
res = items.copy()
return res
def get_tuple_fallback_from_unpack(unpack: UnpackType) -> TypeInfo:
"""Get builtins.tuple type from available types to construct homogeneous tuples."""
tp = get_proper_type(unpack.type)
if isinstance(tp, Instance) and tp.type.fullname == "builtins.tuple":
return tp.type
if isinstance(tp, TypeVarTupleType):
return tp.tuple_fallback.type
if isinstance(tp, TupleType):
for base in tp.partial_fallback.type.mro:
if base.fullname == "builtins.tuple":
return base
assert False, "Invalid unpack type"
def repack_callable_args(callable: CallableType, tuple_type: TypeInfo) -> list[Type]:
"""Present callable with star unpack in a normalized form.
Since positional arguments cannot follow star argument, they are packed in a suffix,
while prefix is represented as individual positional args. We want to put all in a single
list with unpack in the middle, and prefix/suffix on the sides (as they would appear
in e.g. a TupleType).
"""
if ARG_STAR not in callable.arg_kinds:
return callable.arg_types
star_index = callable.arg_kinds.index(ARG_STAR)
arg_types = callable.arg_types[:star_index]
star_type = callable.arg_types[star_index]
suffix_types = []
if not isinstance(star_type, UnpackType):
# Re-normalize *args: X -> *args: *tuple[X, ...]
star_type = UnpackType(Instance(tuple_type, [star_type]))
else:
tp = get_proper_type(star_type.type)
if isinstance(tp, TupleType):
assert isinstance(tp.items[0], UnpackType)
star_type = tp.items[0]
suffix_types = tp.items[1:]
return arg_types + [star_type] + suffix_types
def build_constraints_for_simple_unpack(
template_args: list[Type], actual_args: list[Type], direction: int
) -> list[Constraint]:
"""Infer constraints between two lists of types with variadic items.
This function is only supposed to be called when a variadic item is present in templates.
If there is no variadic item the actuals, we simply use split_with_prefix_and_suffix()
and infer prefix <: prefix, suffix <: suffix, variadic <: middle. If there is a variadic
item in the actuals we need to be more careful, only common prefix/suffix can generate
constraints, also we can only infer constraints for variadic template item, if template
prefix/suffix are shorter that actual ones, otherwise there may be partial overlap
between variadic items, for example if template prefix is longer:
templates: T1, T2, Ts, Ts, Ts, ...
actuals: A1, As, As, As, ...
Note: this function can only be called for builtin variadic constructors: Tuple and Callable.
For instances, you should first find correct type argument mapping.
"""
template_unpack = find_unpack_in_list(template_args)
assert template_unpack is not None
template_prefix = template_unpack
template_suffix = len(template_args) - template_prefix - 1
t_unpack = None
res = []
actual_unpack = find_unpack_in_list(actual_args)
if actual_unpack is None:
t_unpack = template_args[template_unpack]
if template_prefix + template_suffix > len(actual_args):
# These can't be subtypes of each-other, return fast.
assert isinstance(t_unpack, UnpackType)
if isinstance(t_unpack.type, TypeVarTupleType):
# Set TypeVarTuple to empty to improve error messages.
return [
Constraint(
t_unpack.type, direction, TupleType([], t_unpack.type.tuple_fallback)
)
]
else:
return []
common_prefix = template_prefix
common_suffix = template_suffix
else:
actual_prefix = actual_unpack
actual_suffix = len(actual_args) - actual_prefix - 1
common_prefix = min(template_prefix, actual_prefix)
common_suffix = min(template_suffix, actual_suffix)
if actual_prefix >= template_prefix and actual_suffix >= template_suffix:
# This is the only case where we can guarantee there will be no partial overlap
# (note however partial overlap is OK for variadic tuples, it is handled below).
t_unpack = template_args[template_unpack]
# Handle constraints from prefixes/suffixes first.
start, middle, end = split_with_prefix_and_suffix(
tuple(actual_args), common_prefix, common_suffix
)
for t, a in zip(template_args[:common_prefix], start):
res.extend(infer_constraints(t, a, direction))
if common_suffix:
for t, a in zip(template_args[-common_suffix:], end):
res.extend(infer_constraints(t, a, direction))
if t_unpack is not None:
# Add constraint(s) for variadic item when possible.
assert isinstance(t_unpack, UnpackType)
tp = get_proper_type(t_unpack.type)
if isinstance(tp, Instance) and tp.type.fullname == "builtins.tuple":
# Homogeneous case *tuple[T, ...] <: [X, Y, Z, ...].
for a in middle:
# TODO: should we use union instead of join here?
if not isinstance(a, UnpackType):
res.extend(infer_constraints(tp.args[0], a, direction))
else:
a_tp = get_proper_type(a.type)
# This is the case *tuple[T, ...] <: *tuple[A, ...].
if isinstance(a_tp, Instance) and a_tp.type.fullname == "builtins.tuple":
res.extend(infer_constraints(tp.args[0], a_tp.args[0], direction))
elif isinstance(tp, TypeVarTupleType):
res.append(Constraint(tp, direction, TupleType(list(middle), tp.tuple_fallback)))
elif actual_unpack is not None:
# A special case for a variadic tuple unpack, we simply infer T <: X from
# Tuple[..., *tuple[T, ...], ...] <: Tuple[..., *tuple[X, ...], ...].
actual_unpack_type = actual_args[actual_unpack]
assert isinstance(actual_unpack_type, UnpackType)
a_unpacked = get_proper_type(actual_unpack_type.type)
if isinstance(a_unpacked, Instance) and a_unpacked.type.fullname == "builtins.tuple":
t_unpack = template_args[template_unpack]
assert isinstance(t_unpack, UnpackType)
tp = get_proper_type(t_unpack.type)
if isinstance(tp, Instance) and tp.type.fullname == "builtins.tuple":
res.extend(infer_constraints(tp.args[0], a_unpacked.args[0], direction))
return res
def infer_directed_arg_constraints(left: Type, right: Type, direction: int) -> list[Constraint]:
"""Infer constraints between two arguments using direction between original callables."""
if isinstance(left, (ParamSpecType, UnpackType)) or isinstance(
right, (ParamSpecType, UnpackType)
):
# This avoids bogus constraints like T <: P.args
# TODO: can we infer something useful for *T vs P?
return []
if direction == SUBTYPE_OF:
# We invert direction to account for argument contravariance.
return infer_constraints(left, right, neg_op(direction))
else:
return infer_constraints(right, left, neg_op(direction))
def infer_callable_arguments_constraints(
template: NormalizedCallableType | Parameters,
actual: NormalizedCallableType | Parameters,
direction: int,
) -> list[Constraint]:
"""Infer constraints between argument types of two callables.
This function essentially extracts four steps from are_parameters_compatible() in
subtypes.py that involve subtype checks between argument types. We keep the argument
matching logic, but ignore various strictness flags present there, and checks that
do not involve subtyping. Then in place of every subtype check we put an infer_constraints()
call for the same types.
"""
res = []
if direction == SUBTYPE_OF:
left, right = template, actual
else:
left, right = actual, template
left_star = left.var_arg()
left_star2 = left.kw_arg()
right_star = right.var_arg()
right_star2 = right.kw_arg()
# Numbering of steps below matches the one in are_parameters_compatible() for convenience.
# Phase 1a: compare star vs star arguments.
if left_star is not None and right_star is not None:
res.extend(infer_directed_arg_constraints(left_star.typ, right_star.typ, direction))
if left_star2 is not None and right_star2 is not None:
res.extend(infer_directed_arg_constraints(left_star2.typ, right_star2.typ, direction))
# Phase 1b: compare left args with corresponding non-star right arguments.
for right_arg in right.formal_arguments():
left_arg = mypy.typeops.callable_corresponding_argument(left, right_arg)
if left_arg is None:
continue
res.extend(infer_directed_arg_constraints(left_arg.typ, right_arg.typ, direction))
# Phase 1c: compare left args with right *args.
if right_star is not None:
right_by_position = right.try_synthesizing_arg_from_vararg(None)
assert right_by_position is not None
i = right_star.pos
assert i is not None
while i < len(left.arg_kinds) and left.arg_kinds[i].is_positional():
left_by_position = left.argument_by_position(i)
assert left_by_position is not None
res.extend(
infer_directed_arg_constraints(
left_by_position.typ, right_by_position.typ, direction
)
)
i += 1
# Phase 1d: compare left args with right **kwargs.
if right_star2 is not None:
right_names = {name for name in right.arg_names if name is not None}
left_only_names = set()
for name, kind in zip(left.arg_names, left.arg_kinds):
if name is None or kind.is_star() or name in right_names:
continue
left_only_names.add(name)
right_by_name = right.try_synthesizing_arg_from_kwarg(None)
assert right_by_name is not None
for name in left_only_names:
left_by_name = left.argument_by_name(name)
assert left_by_name is not None
res.extend(
infer_directed_arg_constraints(left_by_name.typ, right_by_name.typ, direction)
)
return res
def filter_imprecise_kinds(cs: list[Constraint]) -> list[Constraint]:
"""For each ParamSpec remove all imprecise constraints, if at least one precise available."""
have_precise = set()
for c in cs:
if not isinstance(c.origin_type_var, ParamSpecType):
continue
if (
isinstance(c.target, ParamSpecType)
or isinstance(c.target, Parameters)
and not c.target.imprecise_arg_kinds
):
have_precise.add(c.type_var)
new_cs = []
for c in cs:
if not isinstance(c.origin_type_var, ParamSpecType) or c.type_var not in have_precise:
new_cs.append(c)
if not isinstance(c.target, Parameters) or not c.target.imprecise_arg_kinds:
new_cs.append(c)
return new_cs
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/constraints.py
|
Python
|
NOASSERTION
| 76,919 |
from __future__ import annotations
from typing import Any, cast
from mypy.types import (
AnyType,
CallableType,
DeletedType,
ErasedType,
Instance,
LiteralType,
NoneType,
Overloaded,
Parameters,
ParamSpecType,
PartialType,
ProperType,
TupleType,
TypeAliasType,
TypedDictType,
TypeType,
TypeVarTupleType,
TypeVarType,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
)
# type_visitor needs to be imported after types
from mypy.type_visitor import TypeVisitor # ruff: isort: skip
def copy_type(t: ProperType) -> ProperType:
"""Create a shallow copy of a type.
This can be used to mutate the copy with truthiness information.
Classes compiled with mypyc don't support copy.copy(), so we need
a custom implementation.
"""
return t.accept(TypeShallowCopier())
class TypeShallowCopier(TypeVisitor[ProperType]):
def visit_unbound_type(self, t: UnboundType) -> ProperType:
return t
def visit_any(self, t: AnyType) -> ProperType:
return self.copy_common(t, AnyType(t.type_of_any, t.source_any, t.missing_import_name))
def visit_none_type(self, t: NoneType) -> ProperType:
return self.copy_common(t, NoneType())
def visit_uninhabited_type(self, t: UninhabitedType) -> ProperType:
dup = UninhabitedType()
dup.ambiguous = t.ambiguous
return self.copy_common(t, dup)
def visit_erased_type(self, t: ErasedType) -> ProperType:
return self.copy_common(t, ErasedType())
def visit_deleted_type(self, t: DeletedType) -> ProperType:
return self.copy_common(t, DeletedType(t.source))
def visit_instance(self, t: Instance) -> ProperType:
dup = Instance(t.type, t.args, last_known_value=t.last_known_value)
dup.invalid = t.invalid
return self.copy_common(t, dup)
def visit_type_var(self, t: TypeVarType) -> ProperType:
return self.copy_common(t, t.copy_modified())
def visit_param_spec(self, t: ParamSpecType) -> ProperType:
dup = ParamSpecType(
t.name, t.fullname, t.id, t.flavor, t.upper_bound, t.default, prefix=t.prefix
)
return self.copy_common(t, dup)
def visit_parameters(self, t: Parameters) -> ProperType:
dup = Parameters(
t.arg_types,
t.arg_kinds,
t.arg_names,
variables=t.variables,
is_ellipsis_args=t.is_ellipsis_args,
)
return self.copy_common(t, dup)
def visit_type_var_tuple(self, t: TypeVarTupleType) -> ProperType:
dup = TypeVarTupleType(
t.name, t.fullname, t.id, t.upper_bound, t.tuple_fallback, t.default
)
return self.copy_common(t, dup)
def visit_unpack_type(self, t: UnpackType) -> ProperType:
dup = UnpackType(t.type)
return self.copy_common(t, dup)
def visit_partial_type(self, t: PartialType) -> ProperType:
return self.copy_common(t, PartialType(t.type, t.var, t.value_type))
def visit_callable_type(self, t: CallableType) -> ProperType:
return self.copy_common(t, t.copy_modified())
def visit_tuple_type(self, t: TupleType) -> ProperType:
return self.copy_common(t, TupleType(t.items, t.partial_fallback, implicit=t.implicit))
def visit_typeddict_type(self, t: TypedDictType) -> ProperType:
return self.copy_common(
t, TypedDictType(t.items, t.required_keys, t.readonly_keys, t.fallback)
)
def visit_literal_type(self, t: LiteralType) -> ProperType:
return self.copy_common(t, LiteralType(value=t.value, fallback=t.fallback))
def visit_union_type(self, t: UnionType) -> ProperType:
return self.copy_common(t, UnionType(t.items))
def visit_overloaded(self, t: Overloaded) -> ProperType:
return self.copy_common(t, Overloaded(items=t.items))
def visit_type_type(self, t: TypeType) -> ProperType:
# Use cast since the type annotations in TypeType are imprecise.
return self.copy_common(t, TypeType(cast(Any, t.item)))
def visit_type_alias_type(self, t: TypeAliasType) -> ProperType:
assert False, "only ProperTypes supported"
def copy_common(self, t: ProperType, t2: ProperType) -> ProperType:
t2.line = t.line
t2.column = t.column
t2.can_be_false = t.can_be_false
t2.can_be_true = t.can_be_true
return t2
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/copytype.py
|
Python
|
NOASSERTION
| 4,451 |
from __future__ import annotations
import os
from typing import Final
# Earliest fully supported Python 3.x version. Used as the default Python
# version in tests. Mypy wheels should be built starting with this version,
# and CI tests should be run on this version (and later versions).
PYTHON3_VERSION: Final = (3, 8)
# Earliest Python 3.x version supported via --python-version 3.x. To run
# mypy, at least version PYTHON3_VERSION is needed.
PYTHON3_VERSION_MIN: Final = (3, 8) # Keep in sync with typeshed's python support
CACHE_DIR: Final = ".mypy_cache"
CONFIG_FILE: Final = ["mypy.ini", ".mypy.ini"]
PYPROJECT_CONFIG_FILES: Final = ["pyproject.toml"]
SHARED_CONFIG_FILES: Final = ["setup.cfg"]
USER_CONFIG_FILES: Final = ["~/.config/mypy/config", "~/.mypy.ini"]
if os.environ.get("XDG_CONFIG_HOME"):
USER_CONFIG_FILES.insert(0, os.path.join(os.environ["XDG_CONFIG_HOME"], "mypy/config"))
CONFIG_FILES: Final = (
CONFIG_FILE + PYPROJECT_CONFIG_FILES + SHARED_CONFIG_FILES + USER_CONFIG_FILES
)
# This must include all reporters defined in mypy.report. This is defined here
# to make reporter names available without importing mypy.report -- this speeds
# up startup.
REPORTER_NAMES: Final = [
"linecount",
"any-exprs",
"linecoverage",
"memory-xml",
"cobertura-xml",
"xml",
"xslt-html",
"xslt-txt",
"html",
"txt",
"lineprecision",
]
# Threshold after which we sometimes filter out most errors to avoid very
# verbose output. The default is to show all errors.
MANY_ERRORS_THRESHOLD: Final = -1
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/defaults.py
|
Python
|
NOASSERTION
| 1,558 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.