filename
stringlengths 6
103
| patch
stringlengths 47
76k
| parent_content
stringlengths 17
1.6M
| id
stringlengths 12
19
|
---|---|---|---|
aiosmtpd/smtp.py
|
@@ -87,7 +87,7 @@ class _DataState(enum.Enum):
EMPTY_BARR = bytearray()
EMPTYBYTES = b''
MISSING = _Missing.MISSING
-NEWLINE = '\n'
+NEWLINE = '\r\n'
VALID_AUTHMECH = re.compile(r"[A-Z0-9_-]+\Z")
# https://tools.ietf.org/html/rfc3207.html#page-3
@@ -1427,9 +1427,10 @@ async def smtp_DATA(self, arg: str) -> None:
# Since eof_received cancels this coroutine,
# readuntil() can never raise asyncio.IncompleteReadError.
try:
- line: bytes = await self._reader.readuntil()
+ # https://datatracker.ietf.org/doc/html/rfc5321#section-2.3.8
+ line: bytes = await self._reader.readuntil(b'\r\n')
log.debug('DATA readline: %s', line)
- assert line.endswith(b'\n')
+ assert line.endswith(b'\r\n')
except asyncio.CancelledError:
# The connection got reset during the DATA command.
log.info('Connection lost during DATA')
@@ -1446,7 +1447,7 @@ async def smtp_DATA(self, arg: str) -> None:
data *= 0
# Drain the stream anyways
line = await self._reader.read(e.consumed)
- assert not line.endswith(b'\n')
+ assert not line.endswith(b'\r\n')
# A lone dot in a line signals the end of DATA.
if not line_fragments and line == b'.\r\n':
break
@@ -1458,7 +1459,7 @@ async def smtp_DATA(self, arg: str) -> None:
# Discard data immediately to prevent memory pressure
data *= 0
line_fragments.append(line)
- if line.endswith(b'\n'):
+ if line.endswith(b'\r\n'):
# Record data only if state is "NOMINAL"
if state == _DataState.NOMINAL:
line = EMPTY_BARR.join(line_fragments)
|
# Copyright 2014-2021 The aiosmtpd Developers
# SPDX-License-Identifier: Apache-2.0
import asyncio
import asyncio.sslproto as sslproto
import binascii
import collections
import enum
import inspect
import logging
import re
import socket
import ssl
from base64 import b64decode, b64encode
from email._header_value_parser import get_addr_spec, get_angle_addr
from email.errors import HeaderParseError
from typing import (
Any,
AnyStr,
Awaitable,
Callable,
Dict,
Iterable,
List,
MutableMapping,
NamedTuple,
Optional,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
)
from warnings import warn
import attr
from public import public
from aiosmtpd import __version__, _get_or_new_eventloop
from aiosmtpd.proxy_protocol import ProxyData, get_proxy
# region #### Custom Data Types #######################################################
class _Missing(enum.Enum):
MISSING = object()
class _AuthMechAttr(NamedTuple):
method: "AuthMechanismType"
is_builtin: bool
class _DataState(enum.Enum):
NOMINAL = enum.auto()
TOO_LONG = enum.auto()
TOO_MUCH = enum.auto()
AuthCallbackType = Callable[[str, bytes, bytes], bool]
AuthenticatorType = Callable[["SMTP", "Session", "Envelope", str, Any], "AuthResult"]
AuthMechanismType = Callable[["SMTP", List[str]], Awaitable[Any]]
_TriStateType = Union[None, _Missing, bytes]
RT = TypeVar("RT") # "ReturnType"
DecoratorType = Callable[[Callable[..., RT]], Callable[..., RT]]
# endregion
# region #### Constant & Constant-likes ###############################################
__all__ = [
"AuthCallbackType",
"AuthMechanismType",
"MISSING",
"__version__",
] # Will be added to by @public
__ident__ = 'Python SMTP {}'.format(__version__)
log = logging.getLogger('mail.log')
BOGUS_LIMIT = 5
CALL_LIMIT_DEFAULT = 20
DATA_SIZE_DEFAULT = 2**25 # Where does this number come from, I wonder...
EMPTY_BARR = bytearray()
EMPTYBYTES = b''
MISSING = _Missing.MISSING
NEWLINE = '\n'
VALID_AUTHMECH = re.compile(r"[A-Z0-9_-]+\Z")
# https://tools.ietf.org/html/rfc3207.html#page-3
ALLOWED_BEFORE_STARTTLS = {"NOOP", "EHLO", "STARTTLS", "QUIT"}
# Auth hiding regexes
CLIENT_AUTH_B = re.compile(
# Matches "AUTH" <mechanism> <whitespace_but_not_\r_nor_\n>
br"(?P<authm>\s*AUTH\s+\S+[^\S\r\n]+)"
# Param to AUTH <mechanism>. We only need to sanitize if param is given, which
# for some mechanisms contain sensitive info. If no param is given, then we
# can skip (match fails)
br"(\S+)"
# Optional bCRLF at end. Why optional? Because we also want to sanitize the
# stripped line. If no bCRLF, then this group will be b""
br"(?P<crlf>(?:\r\n)?)", re.IGNORECASE
)
"""Regex that matches 'AUTH <mech> <param>' commend"""
# endregion
@attr.s
class AuthResult:
"""
Contains the result of authentication, to be returned to the smtp_AUTH method.
All initialization arguments _must_ be keyworded!
"""
success: bool = attr.ib(kw_only=True)
"""Indicates authentication is successful or not"""
handled: bool = attr.ib(kw_only=True, default=True)
"""
True means everything (including sending of status code) has been handled by the
AUTH handler and smtp_AUTH should not do anything else.
Applicable only if success == False.
"""
message: Optional[str] = attr.ib(kw_only=True, default=None)
"""
Optional message for additional handling by smtp_AUTH.
Applicable only if handled == False.
"""
auth_data: Optional[Any] = attr.ib(kw_only=True, default=None, repr=lambda x: "...")
"""
Optional free-form authentication data. For the built-in mechanisms, it is usually
an instance of LoginPassword. Other implementations are free to use any data
structure here.
"""
@public
class LoginPassword(NamedTuple):
login: bytes
password: bytes
def __str__(self) -> str:
return f"LoginPassword(login='{self.login.decode()}', password=...)"
def __repr__(self) -> str:
return str(self)
@public
class Session:
def __init__(self, loop: asyncio.AbstractEventLoop):
self.peer: Optional[str] = None
self.ssl: Optional[dict[str, Any]] = None
self.host_name: Optional[str] = None
self.extended_smtp = False
self.loop = loop
self.proxy_data: Optional[ProxyData] = None
"""Data from PROXY Protocol handshake"""
self._login_data = None
self.auth_data = None
"""
New system *optional* authentication data;
can contain anything returned by the authenticator callback.
Can even be None; check `authenticated` attribute to determine
if AUTH successful or not.
"""
self.authenticated: Optional[bool] = None
@property
def login_data(self) -> Any:
"""Legacy login_data, usually containing the username"""
log.warning(
"Session.login_data is deprecated and will be removed in version 2.0"
)
return self._login_data
@login_data.setter
def login_data(self, value: Any) -> None:
log.warning(
"Session.login_data is deprecated and will be removed in version 2.0"
)
self._login_data = value
@public
class Envelope:
def __init__(self) -> None:
self.mail_from: Optional[str] = None
self.mail_options: List[str] = []
self.smtp_utf8 = False
self.content: Union[None, bytes, str] = None
self.original_content: Optional[bytes] = None
self.rcpt_tos: List[str] = []
self.rcpt_options: List[str] = []
# This is here to enable debugging output when the -E option is given to the
# unit test suite. In that case, this function is mocked to set the debug
# level on the loop (as if PYTHONASYNCIODEBUG=1 were set).
def make_loop() -> asyncio.AbstractEventLoop:
return _get_or_new_eventloop()
@public
def syntax(
text: str, extended: Optional[str] = None, when: Optional[str] = None
) -> DecoratorType:
"""
A @decorator that provides helptext for (E)SMTP HELP.
Applies for smtp_* methods only!
:param text: Help text for (E)SMTP HELP
:param extended: Additional text for ESMTP HELP (appended to text)
:param when: The name of the attribute of SMTP class to check; if the value
of the attribute is false-y then HELP will not be available for the command
"""
def decorator(f: Callable[..., RT]) -> Callable[..., RT]:
f.__smtp_syntax__ = text # type: ignore[attr-defined]
f.__smtp_syntax_extended__ = extended # type: ignore[attr-defined]
f.__smtp_syntax_when__ = when # type: ignore[attr-defined]
return f
return decorator
@public
def auth_mechanism(actual_name: str) -> DecoratorType:
"""
A @decorator to explicitly specifies the name of the AUTH mechanism implemented by
the function/method this decorates
:param actual_name: Name of AUTH mechanism. Must consists of [A-Z0-9_-] only.
Will be converted to uppercase
"""
def decorator(f: Callable[..., RT]) -> Callable[..., RT]:
f.__auth_mechanism_name__ = actual_name # type: ignore[attr-defined]
return f
actual_name = actual_name.upper()
if not VALID_AUTHMECH.match(actual_name):
raise ValueError(f"Invalid AUTH mechanism name: {actual_name}")
return decorator
def login_always_fail(
mechanism: str, login: bytes, password: bytes
) -> bool:
return False
def is_int(o: Any) -> bool:
return isinstance(o, int)
@public
class TLSSetupException(Exception):
pass
@public
def sanitize(text: bytes) -> bytes:
m = CLIENT_AUTH_B.match(text)
if m:
return m.group("authm") + b"********" + m.group("crlf")
return text
@public
def sanitized_log(func: Callable[..., None], msg: AnyStr, *args, **kwargs) -> None:
"""
Sanitize args before passing to a logging function.
"""
sanitized_args = [
sanitize(a) if isinstance(a, bytes) else a
for a in args
]
func(msg, *sanitized_args, **kwargs)
@public
class SMTP(asyncio.StreamReaderProtocol):
"""
`Documentation can be found here
<https://aiosmtpd.readthedocs.io/en/latest/smtp.html>`_
"""
command_size_limit = 512
command_size_limits: Dict[str, int] = collections.defaultdict(
lambda: SMTP.command_size_limit)
line_length_limit = 1001
"""Maximum line length according to RFC 5321 s 4.5.3.1.6"""
# The number comes from this calculation:
# (RFC 5322 s 2.1.1 + RFC 6532 s 3.4) 998 octets + CRLF = 1000 octets
# (RFC 5321 s 4.5.3.1.6) 1000 octets + "transparent dot" = 1001 octets
local_part_limit: int = 0
"""
Maximum local part length. (RFC 5321 § 4.5.3.1.1 specifies 64, but lenient)
If 0 or Falsey, local part length is unlimited.
"""
AuthLoginUsernameChallenge = "User Name\x00"
AuthLoginPasswordChallenge = "Password\x00"
def __init__(
self,
handler: Any,
*,
data_size_limit: Optional[int] = DATA_SIZE_DEFAULT,
enable_SMTPUTF8: bool = False,
decode_data: bool = False,
hostname: Optional[str] = None,
ident: Optional[str] = None,
tls_context: Optional[ssl.SSLContext] = None,
require_starttls: bool = False,
timeout: float = 300,
auth_required: bool = False,
auth_require_tls: bool = True,
auth_exclude_mechanism: Optional[Iterable[str]] = None,
auth_callback: Optional[AuthCallbackType] = None,
command_call_limit: Union[int, Dict[str, int], None] = None,
authenticator: Optional[AuthenticatorType] = None,
proxy_protocol_timeout: Optional[Union[int, float]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None
):
self.__ident__ = ident or __ident__
self.loop = loop if loop else make_loop()
super().__init__(
asyncio.StreamReader(loop=self.loop, limit=self.line_length_limit),
client_connected_cb=self._cb_client_connected,
loop=self.loop)
self.event_handler = handler
assert data_size_limit is None or isinstance(data_size_limit, int)
self.data_size_limit = data_size_limit
self.enable_SMTPUTF8 = enable_SMTPUTF8
self._decode_data = decode_data
self.command_size_limits.clear()
if hostname:
self.hostname = hostname
else:
self.hostname = socket.getfqdn()
self.tls_context = tls_context
if tls_context:
if (tls_context.verify_mode
not in {ssl.CERT_NONE, ssl.CERT_OPTIONAL}): # noqa: DUO122
log.warning("tls_context.verify_mode not in {CERT_NONE, "
"CERT_OPTIONAL}; this might cause client "
"connection problems")
elif tls_context.check_hostname:
log.warning("tls_context.check_hostname == True; "
"this might cause client connection problems")
self.require_starttls = tls_context and require_starttls
self._timeout_duration = timeout
self._timeout_handle: Optional[asyncio.TimerHandle] = None
self._tls_handshake_okay = True
self._tls_protocol: Optional[sslproto.SSLProtocol] = None
self._original_transport: Optional[asyncio.BaseTransport] = None
self.session: Optional[Session] = None
self.envelope: Optional[Envelope] = None
self.transport: Optional[asyncio.BaseTransport] = None
self._handler_coroutine: Optional[asyncio.Task[None]] = None
if not auth_require_tls and auth_required:
warn("Requiring AUTH while not requiring TLS "
"can lead to security vulnerabilities!")
log.warning("auth_required == True but auth_require_tls == False")
self._auth_require_tls = auth_require_tls
if proxy_protocol_timeout is not None:
if proxy_protocol_timeout <= 0:
raise ValueError("proxy_protocol_timeout must be > 0")
elif proxy_protocol_timeout < 3.0:
log.warning("proxy_protocol_timeout < 3.0")
self._proxy_timeout = proxy_protocol_timeout
self._authenticator: Optional[AuthenticatorType]
self._auth_callback: Optional[AuthCallbackType]
if authenticator is not None:
self._authenticator = authenticator
self._auth_callback = None
else:
self._auth_callback = auth_callback or login_always_fail
self._authenticator = None
self._auth_required = auth_required
# Get hooks & methods to significantly speedup getattr's
self._auth_methods: Dict[str, _AuthMechAttr] = {
getattr(
mfunc, "__auth_mechanism_name__",
mname.replace("auth_", "").replace("__", "-")
): _AuthMechAttr(mfunc, obj is self)
for obj in (self, handler)
for mname, mfunc in inspect.getmembers(obj)
if mname.startswith("auth_")
}
for m in (auth_exclude_mechanism or []):
self._auth_methods.pop(m, None)
log.info(
"Available AUTH mechanisms: "
+ " ".join(
m + "(builtin)" if impl.is_builtin else m
for m, impl in sorted(self._auth_methods.items())
)
)
self._handle_hooks: Dict[str, Callable] = {
m.replace("handle_", ""): getattr(handler, m)
for m in dir(handler)
if m.startswith("handle_")
}
# When we've deprecated the 4-arg form of handle_EHLO,
# we can -- and should -- remove this whole code block
ehlo_hook = self._handle_hooks.get("EHLO")
if ehlo_hook is None:
self._ehlo_hook_ver = None
else:
ehlo_hook_params = inspect.signature(ehlo_hook).parameters
if len(ehlo_hook_params) == 4:
self._ehlo_hook_ver = "old"
warn("Use the 5-argument handle_EHLO() hook instead of "
"the 4-argument handle_EHLO() hook; "
"support for the 4-argument handle_EHLO() hook will be "
"removed in version 2.0",
DeprecationWarning)
elif len(ehlo_hook_params) == 5:
self._ehlo_hook_ver = "new"
else:
raise RuntimeError("Unsupported EHLO Hook")
self._smtp_methods: Dict[str, Any] = {
m.replace("smtp_", ""): getattr(self, m)
for m in dir(self)
if m.startswith("smtp_")
}
self._call_limit_default: int
if command_call_limit is None:
self._enforce_call_limit = False
else:
self._enforce_call_limit = True
if isinstance(command_call_limit, int):
self._call_limit_base = {}
self._call_limit_default = command_call_limit
elif isinstance(command_call_limit, dict):
if not all(map(is_int, command_call_limit.values())):
raise TypeError("All command_call_limit values must be int")
self._call_limit_base = command_call_limit
self._call_limit_default = command_call_limit.get(
"*", CALL_LIMIT_DEFAULT
)
else:
raise TypeError("command_call_limit must be int or Dict[str, int]")
def _create_session(self) -> Session:
return Session(self.loop)
def _create_envelope(self) -> Envelope:
return Envelope()
async def _call_handler_hook(self, command: str, *args) -> Any:
hook = self._handle_hooks.get(command)
if hook is None:
return MISSING
status = await hook(self, self.session, self.envelope, *args)
return status
@property
def max_command_size_limit(self) -> int:
try:
return max(self.command_size_limits.values())
except ValueError:
return self.command_size_limit
def __del__(self): # pragma: nocover
# This is nocover-ed because the contents *totally* does NOT affect function-
# ality, and in addition this comes directly from StreamReaderProtocol.__del__()
# but with a getattr()+check addition to stop the annoying (but harmless)
# "exception ignored" messages caused by AttributeError when self._closed is
# missing (which seems to happen randomly).
closed = getattr(self, "_closed", None)
if closed is None:
return
if closed.done() and not closed.cancelled():
closed.exception()
def connection_made(self, transport: asyncio.BaseTransport) -> None:
# Reset state due to rfc3207 part 4.2.
self._set_rset_state()
self.session = self._create_session()
self.session.peer = transport.get_extra_info('peername')
self._reset_timeout()
seen_starttls = (self._original_transport is not None)
if self.transport is not None and seen_starttls:
# It is STARTTLS connection over normal connection.
self._reader._transport = transport # type: ignore[attr-defined]
self._writer._transport = transport # type: ignore[attr-defined]
self.transport = transport
# Do SSL certificate checking as rfc3207 part 4.1 says. Why is
# _extra a protected attribute?
assert self._tls_protocol is not None
self.session.ssl = self._tls_protocol._extra
hook = self._handle_hooks.get("STARTTLS")
if hook is None:
self._tls_handshake_okay = True
else:
self._tls_handshake_okay = hook(
self, self.session, self.envelope)
else:
super().connection_made(transport)
self.transport = transport
log.info('Peer: %r', self.session.peer)
# Process the client's requests.
self._handler_coroutine = self.loop.create_task(
self._handle_client())
def connection_lost(self, error: Optional[Exception]) -> None:
assert self.session is not None
log.info('%r connection lost', self.session.peer)
assert self._timeout_handle is not None
self._timeout_handle.cancel()
# If STARTTLS was issued, then our transport is the SSL protocol
# transport, and we need to close the original transport explicitly,
# otherwise an unexpected eof_received() will be called *after* the
# connection_lost(). At that point the stream reader will already be
# destroyed and we'll get a traceback in super().eof_received() below.
if self._original_transport is not None:
self._original_transport.close()
super().connection_lost(error)
assert self._handler_coroutine is not None
self._handler_coroutine.cancel()
self.transport = None
def eof_received(self) -> Optional[bool]:
assert self.session is not None
log.info('%r EOF received', self.session.peer)
assert self._handler_coroutine is not None
self._handler_coroutine.cancel()
if self.session.ssl is not None:
# If STARTTLS was issued, return False, because True has no effect
# on an SSL transport and raises a warning. Our superclass has no
# way of knowing we switched to SSL so it might return True.
return False
return super().eof_received()
def _reset_timeout(self, duration: Optional[float] = None) -> None:
if self._timeout_handle is not None:
self._timeout_handle.cancel()
self._timeout_handle = self.loop.call_later(
duration or self._timeout_duration, self._timeout_cb
)
def _timeout_cb(self):
assert self.session is not None
log.info('%r connection timeout', self.session.peer)
# Calling close() on the transport will trigger connection_lost(),
# which gracefully closes the SSL transport if required and cleans
# up state.
assert self.transport is not None
self.transport.close()
def _cb_client_connected(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
):
# This is redundant since we subclass StreamReaderProtocol, but I like
# the shorter names.
self._reader = reader
self._writer = writer
def _set_post_data_state(self):
"""Reset state variables to their post-DATA state."""
self.envelope = self._create_envelope()
def _set_rset_state(self):
"""Reset all state variables except the greeting."""
self._set_post_data_state()
async def push(self, status: AnyStr):
if isinstance(status, str):
response = bytes(
status, 'utf-8' if self.enable_SMTPUTF8 else 'ascii')
else:
response = status
assert isinstance(response, bytes)
self._writer.write(response + b"\r\n")
assert self.session is not None
log.debug("%r << %r", self.session.peer, response)
await self._writer.drain()
async def handle_exception(self, error: Exception) -> str:
if hasattr(self.event_handler, 'handle_exception'):
status = await self.event_handler.handle_exception(error)
return status
else:
assert self.session is not None
log.exception('%r SMTP session exception', self.session.peer)
status = '500 Error: ({}) {}'.format(
error.__class__.__name__, str(error))
return status
async def _handle_client(self) -> None:
assert self.session is not None
log.info('%r handling connection', self.session.peer)
if self._proxy_timeout is not None:
self._reset_timeout(self._proxy_timeout)
log.debug("%r waiting PROXY handshake", self.session.peer)
self.session.proxy_data = await get_proxy(self._reader)
if self.session.proxy_data:
log.info("%r valid PROXY handshake", self.session.peer)
status = await self._call_handler_hook("PROXY", self.session.proxy_data)
log.debug("%r handle_PROXY returned %r", self.session.peer, status)
else:
log.warning("%r invalid PROXY handshake", self.session.peer)
status = False
if status is MISSING or not status:
log.info("%r rejected by handle_PROXY", self.session.peer)
assert self.transport is not None
self.transport.close()
return
self._reset_timeout()
await self.push('220 {} {}'.format(self.hostname, self.__ident__))
if self._enforce_call_limit:
call_limit: MutableMapping[str, int] = collections.defaultdict(
lambda: self._call_limit_default,
self._call_limit_base
)
else:
# Not used, but this silences code inspection tools
call_limit = {}
bogus_budget = BOGUS_LIMIT
while self.transport is not None: # pragma: nobranch
try:
try:
line: bytes = await self._reader.readuntil()
except asyncio.LimitOverrunError as error:
# Line too long. Read until end of line before sending 500.
await self._reader.read(error.consumed)
while True:
try:
await self._reader.readuntil()
break
except asyncio.LimitOverrunError as e:
# Line is even longer...
await self._reader.read(e.consumed)
continue
# Now that we have read a full line from the client,
# send error response and read the next command line.
await self.push('500 Command line too long')
continue
sanitized_log(log.debug, '_handle_client readline: %r', line)
# XXX this rstrip may not completely preserve old behavior.
line = line.rstrip(b'\r\n')
sanitized_log(log.info, '%r >> %r', self.session.peer, line)
if not line:
await self.push('500 Error: bad syntax')
continue
command_bytes, _, arg_bytes = line.partition(b" ")
# Decode to string only the command name part, which must be
# ASCII as per RFC. If there is an argument, it is decoded to
# UTF-8/surrogateescape so that non-UTF-8 data can be
# re-encoded back to the original bytes when the SMTP command
# is handled.
try:
command = command_bytes.upper().decode(encoding='ascii')
except UnicodeDecodeError:
await self.push('500 Error: bad syntax')
continue
if not arg_bytes:
arg: Optional[str] = None
else:
arg_bytes = arg_bytes.strip()
# Remote SMTP servers can send us UTF-8 content despite
# whether they've declared to do so or not. Some old
# servers can send 8-bit data. Use surrogateescape so
# that the fidelity of the decoding is preserved, and the
# original bytes can be retrieved.
if self.enable_SMTPUTF8:
arg = str(
arg_bytes, encoding='utf-8', errors='surrogateescape')
else:
try:
arg = str(arg_bytes, encoding='ascii', errors='strict')
except UnicodeDecodeError:
# This happens if enable_SMTPUTF8 is false, meaning
# that the server explicitly does not want to
# accept non-ASCII, but the client ignores that and
# sends non-ASCII anyway.
await self.push('500 Error: strict ASCII mode')
# Should we await self.handle_exception()?
continue
max_sz = (
self.command_size_limits[command]
if self.session.extended_smtp
else self.command_size_limit
)
if len(line) > max_sz:
await self.push('500 Command line too long')
continue
if not self._tls_handshake_okay and command != 'QUIT':
await self.push(
'554 Command refused due to lack of security')
continue
if (self.require_starttls
and not self._tls_protocol
and command not in ALLOWED_BEFORE_STARTTLS):
# RFC3207 part 4
await self.push('530 Must issue a STARTTLS command first')
continue
if self._enforce_call_limit:
budget = call_limit[command]
if budget < 1:
log.warning(
"%r over limit for %s", self.session.peer, command
)
await self.push(
f"421 4.7.0 {command} sent too many times"
)
self.transport.close()
continue
call_limit[command] = budget - 1
method = self._smtp_methods.get(command)
if method is None:
log.warning("%r unrecognised: %s", self.session.peer, command)
bogus_budget -= 1
if bogus_budget < 1:
log.warning("%r too many bogus commands", self.session.peer)
await self.push(
"502 5.5.1 Too many unrecognized commands, goodbye."
)
self.transport.close()
continue
await self.push(
f'500 Error: command "{command}" not recognized'
)
continue
# Received a valid command, reset the timer.
self._reset_timeout()
await method(arg)
except asyncio.CancelledError:
# The connection got reset during the DATA command.
# XXX If handler method raises ConnectionResetError, we should
# verify that it was actually self._reader that was reset.
log.info('%r Connection lost during _handle_client()',
self.session.peer)
self._writer.close()
raise
except ConnectionResetError:
log.info('%r Connection lost during _handle_client()',
self.session.peer)
self._writer.close()
raise
except Exception as error:
status = None
try:
status = await self.handle_exception(error)
except Exception as inner_error:
try:
log.exception('%r Exception in handle_exception()',
self.session.peer)
status = '500 Error: ({}) {}'.format(
inner_error.__class__.__name__, str(inner_error))
except Exception:
status = '500 Error: Cannot describe error'
finally:
if isinstance(error, TLSSetupException):
# This code branch is inside None check for self.transport
# so there shouldn't be a None self.transport but pytype
# still complains, so silence that error.
self.transport.close() # pytype: disable=attribute-error
self.connection_lost(error)
else:
# The value of status is being set with ex-except and it
# shouldn't be None, but pytype isn't able to infer that
# so ignore the error related to wrong argument types.
await self.push(status) # pytype: disable=wrong-arg-types
async def check_helo_needed(self, helo: str = "HELO") -> bool:
"""
Check if HELO/EHLO is needed.
:param helo: The actual string of HELO/EHLO
:return: True if HELO/EHLO is needed
"""
assert self.session is not None
if not self.session.host_name:
await self.push(f'503 Error: send {helo} first')
return True
return False
async def check_auth_needed(self, caller_method: str) -> bool:
"""
Check if AUTH is needed.
:param caller_method: The SMTP method needing a check (for logging)
:return: True if AUTH is needed
"""
assert self.session is not None
if self._auth_required and not self.session.authenticated:
log.info(f'{caller_method}: Authentication required')
await self.push('530 5.7.0 Authentication required')
return True
return False
# SMTP and ESMTP commands
@syntax('HELO hostname')
async def smtp_HELO(self, hostname: str):
if not hostname:
await self.push('501 Syntax: HELO hostname')
return
self._set_rset_state()
assert self.session is not None
self.session.extended_smtp = False
status = await self._call_handler_hook('HELO', hostname)
if status is MISSING:
self.session.host_name = hostname
status = '250 {}'.format(self.hostname)
await self.push(status)
@syntax('EHLO hostname')
async def smtp_EHLO(self, hostname: str):
if not hostname:
await self.push('501 Syntax: EHLO hostname')
return
response = ['250-' + self.hostname, ]
self._set_rset_state()
assert self.session is not None
self.session.extended_smtp = True
if self.data_size_limit:
response.append(f'250-SIZE {self.data_size_limit}')
self.command_size_limits['MAIL'] += 26
if not self._decode_data:
response.append('250-8BITMIME')
if self.enable_SMTPUTF8:
response.append('250-SMTPUTF8')
self.command_size_limits['MAIL'] += 10
if self.tls_context and not self._tls_protocol:
response.append('250-STARTTLS')
if not self._auth_require_tls or self._tls_protocol:
response.append(
"250-AUTH " + " ".join(sorted(self._auth_methods.keys()))
)
if hasattr(self, 'ehlo_hook'):
warn('Use handler.handle_EHLO() instead of .ehlo_hook()',
DeprecationWarning)
await self.ehlo_hook()
if self._ehlo_hook_ver is None:
self.session.host_name = hostname
response.append('250 HELP')
elif self._ehlo_hook_ver == "old":
# Old behavior: Send all responses first...
for r in response:
await self.push(r)
# ... then send the response from the hook.
response = [await self._call_handler_hook("EHLO", hostname)]
# (The hook might internally send its own responses.)
elif self._ehlo_hook_ver == "new": # pragma: nobranch
# New behavior: hand over list of responses so far to the hook, and
# REPLACE existing list of responses with what the hook returns.
# We will handle the push()ing
response.append('250 HELP')
response = await self._call_handler_hook("EHLO", hostname, response)
for r in response:
await self.push(r)
@syntax('NOOP [ignored]')
async def smtp_NOOP(self, arg: str):
status = await self._call_handler_hook('NOOP', arg)
await self.push('250 OK' if status is MISSING else status)
@syntax('QUIT')
async def smtp_QUIT(self, arg: str):
if arg:
await self.push('501 Syntax: QUIT')
else:
status = await self._call_handler_hook('QUIT')
await self.push('221 Bye' if status is MISSING else status)
assert self._handler_coroutine is not None
self._handler_coroutine.cancel()
assert self.transport is not None
self.transport.close()
@syntax('STARTTLS', when='tls_context')
async def smtp_STARTTLS(self, arg: str):
if arg:
await self.push('501 Syntax: STARTTLS')
return
if not self.tls_context:
await self.push('454 TLS not available')
return
await self.push('220 Ready to start TLS')
# Create a waiter Future to wait for SSL handshake to complete
waiter = self.loop.create_future()
# Create SSL layer.
# noinspection PyTypeChecker
self._tls_protocol = sslproto.SSLProtocol(
self.loop,
self,
self.tls_context,
waiter,
server_side=True)
# Reconfigure transport layer. Keep a reference to the original
# transport so that we can close it explicitly when the connection is
# lost.
self._original_transport = self.transport
assert self._original_transport is not None
self._original_transport.set_protocol(self._tls_protocol)
# Reconfigure the protocol layer. Why is the app transport a protected
# property, if it MUST be used externally?
self.transport = self._tls_protocol._app_transport
self._tls_protocol.connection_made(self._original_transport)
# wait until handshake complete
try:
await waiter
except asyncio.CancelledError:
raise
except Exception as error:
raise TLSSetupException() from error
@syntax("AUTH <mechanism>")
async def smtp_AUTH(self, arg: str) -> None:
if await self.check_helo_needed("EHLO"):
return
assert self.session is not None
if not self.session.extended_smtp:
await self.push("500 Error: command 'AUTH' not recognized")
return
elif self._auth_require_tls and not self._tls_protocol:
await self.push("538 5.7.11 Encryption required for requested "
"authentication mechanism")
return
elif self.session.authenticated:
await self.push('503 Already authenticated')
return
elif not arg:
await self.push('501 Not enough value')
return
args = arg.split()
if len(args) > 2:
await self.push('501 Too many values')
return
mechanism = args[0]
if mechanism not in self._auth_methods:
await self.push('504 5.5.4 Unrecognized authentication type')
return
CODE_SUCCESS = "235 2.7.0 Authentication successful"
CODE_INVALID = "535 5.7.8 Authentication credentials invalid"
status = await self._call_handler_hook('AUTH', args)
if status is MISSING:
auth_method = self._auth_methods[mechanism]
log.debug(
"Using %s auth_ hook for %r",
"builtin" if auth_method.is_builtin else "handler",
mechanism
)
# Pass 'self' to method so external methods can leverage this
# class's helper methods such as push()
auth_result = await auth_method.method(self, args)
log.debug("auth_%s returned %r", mechanism, auth_result)
# New system using `authenticator` and AuthResult
if isinstance(auth_result, AuthResult):
if auth_result.success:
self.session.authenticated = True
_auth_data = auth_result.auth_data
self.session.auth_data = _auth_data
# Custom mechanisms might not implement the "login" attribute, and
# that's okay.
self.session.login_data = getattr(_auth_data, "login", None)
status = auth_result.message or CODE_SUCCESS
else:
if auth_result.handled:
status = None
elif auth_result.message:
status = auth_result.message
else:
status = CODE_INVALID
# Old system using `auth_callback` and _TriState
elif auth_result is None:
# None means there's an error already handled by method and
# we don't need to do anything more
status = None
elif auth_result is MISSING or auth_result is False:
# MISSING means no error in AUTH process, but credentials
# is rejected / not valid
status = CODE_INVALID
else:
self.session.login_data = auth_result
status = CODE_SUCCESS
if status is not None: # pragma: no branch
await self.push(status)
async def challenge_auth(
self,
challenge: Union[str, bytes],
encode_to_b64: bool = True,
log_client_response: bool = False,
) -> Union[_Missing, bytes]:
"""
Send challenge during authentication. "334 " will be prefixed, so do NOT
put "334 " at start of server_message.
:param challenge: Challenge to send to client. If str, will be utf8-encoded.
:param encode_to_b64: If true, then perform Base64 encoding on challenge
:param log_client_response: Perform logging of client's response.
WARNING: Might cause leak of sensitive information! Do not turn on
unless _absolutely_ necessary!
:return: Response from client, or MISSING
"""
challenge = (
challenge.encode() if isinstance(challenge, str) else challenge
)
assert isinstance(challenge, bytes)
# Trailing space is MANDATORY even if challenge is empty.
# See:
# - https://tools.ietf.org/html/rfc4954#page-4 ¶ 5
# - https://tools.ietf.org/html/rfc4954#page-13 "continue-req"
challenge = b"334 " + (b64encode(challenge) if encode_to_b64 else challenge)
assert self.session is not None
log.debug("%r << challenge: %r", self.session.peer, challenge)
await self.push(challenge)
line = await self._reader.readline() # pytype: disable=attribute-error
if log_client_response:
warn("AUTH interaction logging is enabled!")
warn("Sensitive information might be leaked!")
log.debug("%r >> %r", self.session.peer, line)
blob: bytes = line.strip()
# '*' handling in accordance with RFC4954
if blob == b"*":
log.warning("%r aborted AUTH with '*'", self.session.peer)
await self.push("501 5.7.0 Auth aborted")
return MISSING
try:
decoded_blob = b64decode(blob, validate=True)
except binascii.Error:
log.debug("%r can't decode base64: %s", self.session.peer, blob)
await self.push("501 5.5.2 Can't decode base64")
return MISSING
return decoded_blob
_334_PREFIX = re.compile(r"^334 ")
async def _auth_interact(
self,
server_message: str
) -> Union[_Missing, bytes]: # pragma: nocover
warn(
"_auth_interact will be deprecated in version 2.0. "
"Please use challenge_auth() instead.",
DeprecationWarning
)
return await self.challenge_auth(
challenge=self._334_PREFIX.sub("", server_message),
encode_to_b64=False,
)
def _authenticate(self, mechanism: str, auth_data: Any) -> AuthResult:
if self._authenticator is not None:
# self.envelope is likely still empty, but we'll pass it anyways to
# make the invocation similar to the one in _call_handler_hook
assert self.session is not None
assert self.envelope is not None
return self._authenticator(
self, self.session, self.envelope, mechanism, auth_data
)
else:
assert self._auth_callback is not None
assert isinstance(auth_data, LoginPassword)
if self._auth_callback(mechanism, *auth_data):
return AuthResult(success=True, handled=True, auth_data=auth_data)
else:
return AuthResult(success=False, handled=False)
# IMPORTANT NOTES FOR THE auth_* METHODS
# ======================================
# Please note that there are two systems for return values in #2.
#
# 1. For internal methods, due to how they are called, we must ignore the first arg
# 2. (OLD SYSTEM) All auth_* methods can return one of three values:
# - None: An error happened and handled;
# smtp_AUTH should do nothing more
# - MISSING or False: Authentication failed, but not because of error
# - [Any]: Authentication succeeded and this is the 'identity' of
# the SMTP user
# - 'identity' is not always username, depending on the auth mecha-
# nism. Might be a session key, a one-time user ID, or any kind of
# object, actually.
# 2. (NEW SYSTEM) All auth_* methods must return an AuthResult object.
# For explanation on the object's attributes,
# see the AuthResult class definition.
# 3. Auth credentials checking is performed in the auth_* methods because
# more advanced auth mechanism might not return login+password pair
# (see #2 above)
async def auth_PLAIN(self, _, args: List[str]) -> AuthResult:
login_and_password: _TriStateType
if len(args) == 1:
login_and_password = await self.challenge_auth("")
if login_and_password is MISSING:
return AuthResult(success=False)
else:
try:
login_and_password = b64decode(args[1].encode(), validate=True)
except Exception:
await self.push("501 5.5.2 Can't decode base64")
return AuthResult(success=False, handled=True)
try:
# login data is "{authz_id}\x00{login_id}\x00{password}"
# authz_id can be null, and currently ignored
# See https://tools.ietf.org/html/rfc4616#page-3
_, login, password = login_and_password.split(b"\x00") # noqa: E501
except ValueError: # not enough args
await self.push("501 5.5.2 Can't split auth value")
return AuthResult(success=False, handled=True)
# Verify login data
assert login is not None
assert password is not None
return self._authenticate("PLAIN", LoginPassword(login, password))
async def auth_LOGIN(self, _, args: List[str]) -> AuthResult:
login: _TriStateType
if len(args) == 1:
# Client sent only "AUTH LOGIN"
login = await self.challenge_auth(self.AuthLoginUsernameChallenge)
if login is MISSING:
return AuthResult(success=False)
else:
# Client sent "AUTH LOGIN <b64-encoded-username>"
try:
login = b64decode(args[1].encode(), validate=True)
except Exception:
await self.push("501 5.5.2 Can't decode base64")
return AuthResult(success=False, handled=True)
assert login is not None
password: _TriStateType
password = await self.challenge_auth(self.AuthLoginPasswordChallenge)
if password is MISSING:
return AuthResult(success=False)
assert password is not None
return self._authenticate("LOGIN", LoginPassword(login, password))
def _strip_command_keyword(self, keyword: str, arg: str) -> Optional[str]:
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
return arg[keylen:].strip()
return None
def _getaddr(self, arg: str) -> Tuple[Optional[str], Optional[str]]:
"""
Try to parse address given in SMTP command.
Returns address=None if arg can't be parsed properly (get_angle_addr /
get_addr_spec raised HeaderParseError)
"""
class AddrSpec(Protocol):
@property
def addr_spec(self) -> str:
...
if not arg:
return '', ''
address: AddrSpec
try:
if arg.lstrip().startswith('<'):
address, rest = get_angle_addr(arg)
else:
address, rest = get_addr_spec(arg)
except HeaderParseError:
return None, None
addr = address.addr_spec
localpart, atsign, domainpart = addr.rpartition("@")
if self.local_part_limit and len(localpart) > self.local_part_limit:
return None, None
return addr, rest
def _getparams(
self, params: Sequence[str]
) -> Optional[Dict[str, Union[str, bool]]]:
# Return params as dictionary. Return None if not all parameters
# appear to be syntactically valid according to RFC 1869.
result: Dict[str, Union[str, bool]] = {}
for param in params:
param, eq, value = param.partition('=')
if not param.isalnum() or eq and not value:
return None
result[param] = value if eq else True
return result
# noinspection PyUnresolvedReferences
def _syntax_available(self, method: Callable) -> bool:
if not hasattr(method, '__smtp_syntax__'):
return False
if method.__smtp_syntax_when__: # type: ignore[attr-defined]
return bool(getattr(self, method.__smtp_syntax_when__)) # type: ignore[attr-defined]
return True
@syntax('HELP [command]')
async def smtp_HELP(self, arg: str) -> None:
if await self.check_auth_needed("HELP"):
return
code = 250
if arg:
method = self._smtp_methods.get(arg.upper())
if method and self._syntax_available(method):
help_str = method.__smtp_syntax__
assert self.session is not None
if (self.session.extended_smtp
and method.__smtp_syntax_extended__):
help_str += method.__smtp_syntax_extended__
await self.push('250 Syntax: ' + help_str)
return
code = 501
commands = []
for name, method in self._smtp_methods.items():
if self._syntax_available(method):
commands.append(name)
commands.sort()
await self.push(
'{} Supported commands: {}'.format(code, ' '.join(commands)))
@syntax('VRFY <address>')
async def smtp_VRFY(self, arg: str) -> None:
if await self.check_auth_needed("VRFY"):
return
if arg:
address, params = self._getaddr(arg)
if address is None:
await self.push('502 Could not VRFY ' + arg)
else:
status = await self._call_handler_hook('VRFY', address)
await self.push(
'252 Cannot VRFY user, but will accept message '
'and attempt delivery'
if status is MISSING else status)
else:
await self.push('501 Syntax: VRFY <address>')
@syntax('MAIL FROM: <address>', extended=' [SP <mail-parameters>]')
async def smtp_MAIL(self, arg: Optional[str]) -> None:
if await self.check_helo_needed():
return
if await self.check_auth_needed("MAIL"):
return
syntaxerr = '501 Syntax: MAIL FROM: <address>'
assert self.session is not None
if self.session.extended_smtp:
syntaxerr += ' [SP <mail-parameters>]'
if arg is None:
await self.push(syntaxerr)
return
arg = self._strip_command_keyword('FROM:', arg)
if arg is None:
await self.push(syntaxerr)
return
address, addrparams = self._getaddr(arg)
if address is None:
await self.push("553 5.1.3 Error: malformed address")
return
if not address:
await self.push(syntaxerr)
return
if not self.session.extended_smtp and addrparams:
await self.push(syntaxerr)
return
assert self.envelope is not None
if self.envelope.mail_from:
await self.push('503 Error: nested MAIL command')
return
assert addrparams is not None
mail_options = addrparams.upper().split()
params = self._getparams(mail_options)
if params is None:
await self.push(syntaxerr)
return
if not self._decode_data:
body = params.pop('BODY', '7BIT')
if body not in ['7BIT', '8BITMIME']:
await self.push(
'501 Error: BODY can only be one of 7BIT, 8BITMIME')
return
smtputf8 = params.pop('SMTPUTF8', False)
if not isinstance(smtputf8, bool):
await self.push('501 Error: SMTPUTF8 takes no arguments')
return
if smtputf8 and not self.enable_SMTPUTF8:
await self.push('501 Error: SMTPUTF8 disabled')
return
self.envelope.smtp_utf8 = smtputf8
size = params.pop('SIZE', None)
if size:
if isinstance(size, bool) or not size.isdigit():
await self.push(syntaxerr)
return
elif self.data_size_limit and int(size) > self.data_size_limit:
await self.push(
'552 Error: message size exceeds fixed maximum message '
'size')
return
if len(params) > 0:
await self.push(
'555 MAIL FROM parameters not recognized or not implemented')
return
status = await self._call_handler_hook('MAIL', address, mail_options)
if status is MISSING:
self.envelope.mail_from = address
self.envelope.mail_options.extend(mail_options)
status = '250 OK'
log.info('%r sender: %s', self.session.peer, address)
await self.push(status)
@syntax('RCPT TO: <address>', extended=' [SP <mail-parameters>]')
async def smtp_RCPT(self, arg: Optional[str]) -> None:
if await self.check_helo_needed():
return
if await self.check_auth_needed("RCPT"):
return
assert self.envelope is not None
if not self.envelope.mail_from:
await self.push("503 Error: need MAIL command")
return
syntaxerr = '501 Syntax: RCPT TO: <address>'
assert self.session is not None
if self.session.extended_smtp:
syntaxerr += ' [SP <mail-parameters>]'
if arg is None:
await self.push(syntaxerr)
return
arg = self._strip_command_keyword('TO:', arg)
if arg is None:
await self.push(syntaxerr)
return
address, params = self._getaddr(arg)
if address is None:
await self.push("553 5.1.3 Error: malformed address")
return
if not address:
await self.push(syntaxerr)
return
if not self.session.extended_smtp and params:
await self.push(syntaxerr)
return
assert params is not None
rcpt_options = params.upper().split()
params_dict = self._getparams(rcpt_options)
if params_dict is None:
await self.push(syntaxerr)
return
# XXX currently there are no options we recognize.
if len(params_dict) > 0:
await self.push(
'555 RCPT TO parameters not recognized or not implemented'
)
return
status = await self._call_handler_hook('RCPT', address, rcpt_options)
if status is MISSING:
self.envelope.rcpt_tos.append(address)
self.envelope.rcpt_options.extend(rcpt_options)
status = '250 OK'
log.info('%r recip: %s', self.session.peer, address)
await self.push(status)
@syntax('RSET')
async def smtp_RSET(self, arg: str):
if arg:
await self.push('501 Syntax: RSET')
return
self._set_rset_state()
if hasattr(self, 'rset_hook'):
warn('Use handler.handle_RSET() instead of .rset_hook()',
DeprecationWarning)
await self.rset_hook()
status = await self._call_handler_hook('RSET')
await self.push('250 OK' if status is MISSING else status)
@syntax('DATA')
async def smtp_DATA(self, arg: str) -> None:
if await self.check_helo_needed():
return
if await self.check_auth_needed("DATA"):
return
assert self.envelope is not None
if not self.envelope.rcpt_tos:
await self.push('503 Error: need RCPT command')
return
if arg:
await self.push('501 Syntax: DATA')
return
await self.push('354 End data with <CR><LF>.<CR><LF>')
data: List[bytearray] = []
num_bytes: int = 0
limit: Optional[int] = self.data_size_limit
line_fragments: List[bytes] = []
state: _DataState = _DataState.NOMINAL
while self.transport is not None: # pragma: nobranch
# Since eof_received cancels this coroutine,
# readuntil() can never raise asyncio.IncompleteReadError.
try:
line: bytes = await self._reader.readuntil()
log.debug('DATA readline: %s', line)
assert line.endswith(b'\n')
except asyncio.CancelledError:
# The connection got reset during the DATA command.
log.info('Connection lost during DATA')
self._writer.close()
raise
except asyncio.LimitOverrunError as e:
# The line exceeds StreamReader's "stream limit".
# Delay SMTP Status Code sending until data receive is complete
# This seems to be implied in RFC 5321 § 4.2.5
if state == _DataState.NOMINAL:
# Transition to TOO_LONG only if we haven't gone TOO_MUCH yet
state = _DataState.TOO_LONG
# Discard data immediately to prevent memory pressure
data *= 0
# Drain the stream anyways
line = await self._reader.read(e.consumed)
assert not line.endswith(b'\n')
# A lone dot in a line signals the end of DATA.
if not line_fragments and line == b'.\r\n':
break
num_bytes += len(line)
if state == _DataState.NOMINAL and limit and num_bytes > limit:
# Delay SMTP Status Code sending until data receive is complete
# This seems to be implied in RFC 5321 § 4.2.5
state = _DataState.TOO_MUCH
# Discard data immediately to prevent memory pressure
data *= 0
line_fragments.append(line)
if line.endswith(b'\n'):
# Record data only if state is "NOMINAL"
if state == _DataState.NOMINAL:
line = EMPTY_BARR.join(line_fragments)
if len(line) > self.line_length_limit:
# Theoretically we shouldn't reach this place. But it's always
# good to practice DEFENSIVE coding.
state = _DataState.TOO_LONG
# Discard data immediately to prevent memory pressure
data *= 0
else:
data.append(EMPTY_BARR.join(line_fragments))
line_fragments *= 0
# Day of reckoning! Let's take care of those out-of-nominal situations
if state != _DataState.NOMINAL:
if state == _DataState.TOO_LONG:
await self.push("500 Line too long (see RFC5321 4.5.3.1.6)")
elif state == _DataState.TOO_MUCH: # pragma: nobranch
await self.push('552 Error: Too much mail data')
self._set_post_data_state()
return
# If unfinished_line is non-empty, then the connection was closed.
assert not line_fragments
# Remove extraneous carriage returns and de-transparency
# according to RFC 5321, Section 4.5.2.
for text in data:
if text.startswith(b'.'):
del text[0]
original_content: bytes = EMPTYBYTES.join(data)
# Discard data immediately to prevent memory pressure
data *= 0
content: Union[str, bytes]
if self._decode_data:
if self.enable_SMTPUTF8:
content = original_content.decode('utf-8', errors='surrogateescape')
else:
try:
content = original_content.decode('ascii', errors='strict')
except UnicodeDecodeError:
# This happens if enable_smtputf8 is false, meaning that
# the server explicitly does not want to accept non-ascii,
# but the client ignores that and sends non-ascii anyway.
await self.push('500 Error: strict ASCII mode')
return
else:
content = original_content
self.envelope.content = content
self.envelope.original_content = original_content
# Call the new API first if it's implemented.
if "DATA" in self._handle_hooks:
status = await self._call_handler_hook('DATA')
else:
# Backward compatibility.
status = MISSING
if hasattr(self.event_handler, 'process_message'):
warn('Use handler.handle_DATA() instead of .process_message()',
DeprecationWarning)
assert self.session is not None
args = (self.session.peer, self.envelope.mail_from,
self.envelope.rcpt_tos, self.envelope.content)
if asyncio.iscoroutinefunction(
self.event_handler.process_message):
status = await self.event_handler.process_message(*args)
else:
status = self.event_handler.process_message(*args)
# The deprecated API can return None which means, return the
# default status. Don't worry about coverage for this case as
# it's a deprecated API that will go away after 1.0.
if status is None: # pragma: nocover
status = MISSING
self._set_post_data_state()
await self.push('250 OK' if status is MISSING else status)
# Commands that have not been implemented.
async def smtp_EXPN(self, arg: str):
await self.push('502 EXPN not implemented')
|
PYSEC-2024-221
|
arches/app/models/concept.py
|
@@ -32,6 +32,8 @@
from django.utils.translation import ugettext as _
from django.utils.translation import get_language
from django.db import IntegrityError
+from psycopg2.extensions import AsIs
+
import logging
@@ -505,13 +507,12 @@ def get_child_edges(
except:
return []
- languageid = get_language() if languageid is None else languageid
+ # this interpolation is safe because `relationtypes` is hardcoded in all calls, and not accessible via the API
relationtypes = " or ".join(["r.relationtype = '%s'" % (relationtype) for relationtype in relationtypes])
- depth_limit = "and depth < %s" % depth_limit if depth_limit else ""
- child_valuetypes = ("','").join(
- child_valuetypes if child_valuetypes else models.DValueType.objects.filter(category="label").values_list("valuetype", flat=True)
- )
- limit_clause = " limit %s offset %s" % (limit, offset) if offset is not None else ""
+ offset_clause = " limit %(limit)s offset %(offset)s" if offset else ""
+ depth_clause = " and depth < %(depth_limit)s" if depth_limit else ""
+
+ cursor = connection.cursor()
if order_hierarchically:
sql = """
@@ -525,9 +526,9 @@ def get_child_edges(
WHERE conceptid=r.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
- CASE WHEN languageid = '{languageid}' THEN 10
- WHEN languageid like '{short_languageid}%' THEN 5
- WHEN languageid like '{default_languageid}%' THEN 2
+ CASE WHEN languageid = %(languageid)s THEN 10
+ WHEN languageid like %(short_languageid)s THEN 5
+ WHEN languageid like %(default_languageid)s THEN 2
ELSE 0
END
) desc limit 1
@@ -547,8 +548,8 @@ def get_child_edges(
limit 1
) as collector
FROM relations r
- WHERE r.conceptidfrom = '{conceptid}'
- and ({relationtypes})
+ WHERE r.conceptidfrom = %(conceptid)s
+ and (%(relationtypes)s)
ORDER BY sortorder, valuesto
)
UNION
@@ -559,9 +560,9 @@ def get_child_edges(
WHERE conceptid=r.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
- CASE WHEN languageid = '{languageid}' THEN 10
- WHEN languageid like '{short_languageid}%' THEN 5
- WHEN languageid like '{default_languageid}%' THEN 2
+ CASE WHEN languageid = %(languageid)s THEN 10
+ WHEN languageid like %(short_languageid)s THEN 5
+ WHEN languageid like %(default_languageid)s THEN 2
ELSE 0
END
) desc limit 1
@@ -582,7 +583,7 @@ def get_child_edges(
) as collector
FROM relations r
JOIN ordered_relationships b ON(b.conceptidto = r.conceptidfrom)
- WHERE ({relationtypes})
+ WHERE (%(relationtypes)s)
ORDER BY sortorder, valuesto
)
),
@@ -593,17 +594,17 @@ def get_child_edges(
r.collector,
1 AS depth ---|NonRecursive Part
FROM ordered_relationships r
- WHERE r.conceptidfrom = '{conceptid}'
- and ({relationtypes})
+ WHERE r.conceptidfrom = %(conceptid)s
+ and (%(relationtypes)s)
UNION
SELECT r.conceptidfrom, r.conceptidto,
row || '-' || to_char(row_number() OVER (), 'fm000000'),
r.collector,
depth+1 ---|RecursivePart
FROM ordered_relationships r
JOIN children b ON(b.conceptidto = r.conceptidfrom)
- WHERE ({relationtypes})
- {depth_limit}
+ WHERE (%(relationtypes)s)
+ {depth_clause}
)
{subquery}
@@ -614,70 +615,73 @@ def get_child_edges(
FROM (
SELECT *
FROM values
- WHERE conceptid={recursive_table}.conceptidto
+ WHERE conceptid=%(recursive_table)s.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
- CASE WHEN languageid = '{languageid}' THEN 10
- WHEN languageid like '{short_languageid}%' THEN 5
- WHEN languageid like '{default_languageid}%' THEN 2
+ CASE WHEN languageid = %(languageid)s THEN 10
+ WHEN languageid like %(short_languageid)s THEN 5
+ WHEN languageid like %(default_languageid)s THEN 2
ELSE 0
END
) desc limit 1
) d
) as valueto,
depth, collector, count(*) OVER() AS full_count
- FROM {recursive_table} order by row {limit_clause};
-
+ FROM %(recursive_table)s order by row {offset_clause};
"""
- subquery = (
- """, results as (
- SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
- FROM children c
- JOIN values ON(values.conceptid = c.conceptidto)
- WHERE LOWER(values.value) like '%%%s%%'
- AND values.valuetype in ('prefLabel')
- UNION
- SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
- FROM children c
- JOIN results r on (r.conceptidfrom=c.conceptidto)
- )"""
- % query.lower()
- if query is not None
- else ""
- )
+ if query:
+ subquery = """
+ , results as (
+ SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
+ FROM children c
+ JOIN values ON(values.conceptid = c.conceptidto)
+ WHERE LOWER(values.value) like %(query)s
+ AND values.valuetype in ('prefLabel')
+ UNION
+ SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
+ FROM children c
+ JOIN results r on (r.conceptidfrom=c.conceptidto)
+ )
+ """
+ else:
+ subquery = ""
+
+ sql = sql.format(subquery=subquery, offset_clause=offset_clause, depth_clause=depth_clause)
recursive_table = "results" if query else "children"
+ languageid = get_language() if languageid is None else languageid
- sql = sql.format(
- conceptid=conceptid,
- relationtypes=relationtypes,
- child_valuetypes=child_valuetypes,
- parent_valuetype=parent_valuetype,
- depth_limit=depth_limit,
- limit_clause=limit_clause,
- subquery=subquery,
- recursive_table=recursive_table,
- languageid=languageid,
- short_languageid=languageid.split("-")[0],
- default_languageid=settings.LANGUAGE_CODE,
+ cursor.execute(
+ sql,
+ {
+ "conceptid": conceptid,
+ "relationtypes": AsIs(relationtypes),
+ "depth_limit": depth_limit,
+ "limit": limit,
+ "offset": offset,
+ "query": "%" + query.lower() + "%",
+ "recursive_table": AsIs(recursive_table),
+ "languageid": languageid,
+ "short_languageid": languageid.split("-")[0] + "%",
+ "default_languageid": settings.LANGUAGE_CODE + "%",
+ },
)
-
else:
sql = """
WITH RECURSIVE
children AS (
SELECT r.conceptidfrom, r.conceptidto, r.relationtype, 1 AS depth
FROM relations r
- WHERE r.conceptidfrom = '{conceptid}'
- AND ({relationtypes})
+ WHERE r.conceptidfrom = %(conceptid)s
+ AND (%(relationtypes)s)
UNION
SELECT r.conceptidfrom, r.conceptidto, r.relationtype, depth+1
FROM relations r
JOIN children c ON(c.conceptidto = r.conceptidfrom)
- WHERE ({relationtypes})
- {depth_limit}
+ WHERE (%(relationtypes)s)
+ {depth_clause}
),
results AS (
SELECT
@@ -692,14 +696,15 @@ def get_child_edges(
JOIN children c ON(c.conceptidto = valueto.conceptid)
JOIN values valuefrom ON(c.conceptidfrom = valuefrom.conceptid)
JOIN d_value_types dtypesfrom ON(dtypesfrom.valuetype = valuefrom.valuetype)
- WHERE valueto.valuetype in ('{child_valuetypes}')
- AND valuefrom.valuetype in ('{child_valuetypes}')
+ WHERE valueto.valuetype in (%(child_valuetypes)s)
+ AND valuefrom.valuetype in (%(child_valuetypes)s)
)
- SELECT distinct {columns}
- FROM results {limit_clause}
-
+ SELECT distinct %(columns)s
+ FROM results {offset_clause}
"""
+ sql = sql.format(offset_clause=offset_clause, depth_clause=depth_clause)
+
if not columns:
columns = """
conceptidfrom::text, conceptidto::text,
@@ -710,19 +715,24 @@ def get_child_edges(
categoryfrom, categoryto
"""
- sql = sql.format(
- conceptid=conceptid,
- relationtypes=relationtypes,
- child_valuetypes=child_valuetypes,
- columns=columns,
- depth_limit=depth_limit,
- limit_clause=limit_clause,
+ cursor.execute(
+ sql,
+ {
+ "conceptid": conceptid,
+ "relationtypes": AsIs(relationtypes),
+ "child_valuetypes": ("','").join(
+ child_valuetypes
+ if child_valuetypes
+ else models.DValueType.objects.filter(category="label").values_list("valuetype", flat=True)
+ ),
+ "columns": AsIs(columns),
+ "depth_limit": depth_limit,
+ "limit": limit,
+ "offset": offset,
+ },
)
- cursor = connection.cursor()
- cursor.execute(sql)
- rows = cursor.fetchall()
- return rows
+ return cursor.fetchall()
def traverse(self, func, direction="down", scope=None, **kwargs):
"""
@@ -1176,30 +1186,31 @@ def get_e55_domain(self, conceptid):
"""
cursor = connection.cursor()
-
- sql = """
- WITH RECURSIVE children AS (
- SELECT d.conceptidfrom, d.conceptidto, c2.value, c2.valueid as valueid, c.value as valueto, c.valueid as valueidto, c.valuetype as vtype, 1 AS depth, array[d.conceptidto] AS conceptpath, array[c.valueid] AS idpath ---|NonRecursive Part
- FROM relations d
- JOIN values c ON(c.conceptid = d.conceptidto)
- JOIN values c2 ON(c2.conceptid = d.conceptidfrom)
- WHERE d.conceptidfrom = '{0}'
- and c2.valuetype = 'prefLabel'
- and c.valuetype in ('prefLabel', 'sortorder', 'collector')
- and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
- UNION
- SELECT d.conceptidfrom, d.conceptidto, v2.value, v2.valueid as valueid, v.value as valueto, v.valueid as valueidto, v.valuetype as vtype, depth+1, (conceptpath || d.conceptidto), (idpath || v.valueid) ---|RecursivePart
- FROM relations d
- JOIN children b ON(b.conceptidto = d.conceptidfrom)
- JOIN values v ON(v.conceptid = d.conceptidto)
- JOIN values v2 ON(v2.conceptid = d.conceptidfrom)
- WHERE v2.valuetype = 'prefLabel'
- and v.valuetype in ('prefLabel','sortorder', 'collector')
- and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
- ) SELECT conceptidfrom::text, conceptidto::text, value, valueid::text, valueto, valueidto::text, depth, idpath::text, conceptpath::text, vtype FROM children ORDER BY depth, conceptpath;
- """.format(
- conceptid
+ cursor.execute(
+ """
+ WITH RECURSIVE children AS (
+ SELECT d.conceptidfrom, d.conceptidto, c2.value, c2.valueid as valueid, c.value as valueto, c.valueid as valueidto, c.valuetype as vtype, 1 AS depth, array[d.conceptidto] AS conceptpath, array[c.valueid] AS idpath ---|NonRecursive Part
+ FROM relations d
+ JOIN values c ON(c.conceptid = d.conceptidto)
+ JOIN values c2 ON(c2.conceptid = d.conceptidfrom)
+ WHERE d.conceptidfrom = %s
+ and c2.valuetype = 'prefLabel'
+ and c.valuetype in ('prefLabel', 'sortorder', 'collector')
+ and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
+ UNION
+ SELECT d.conceptidfrom, d.conceptidto, v2.value, v2.valueid as valueid, v.value as valueto, v.valueid as valueidto, v.valuetype as vtype, depth+1, (conceptpath || d.conceptidto), (idpath || v.valueid) ---|RecursivePart
+ FROM relations d
+ JOIN children b ON(b.conceptidto = d.conceptidfrom)
+ JOIN values v ON(v.conceptid = d.conceptidto)
+ JOIN values v2 ON(v2.conceptid = d.conceptidfrom)
+ WHERE v2.valuetype = 'prefLabel'
+ and v.valuetype in ('prefLabel','sortorder', 'collector')
+ and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
+ ) SELECT conceptidfrom::text, conceptidto::text, value, valueid::text, valueto, valueidto::text, depth, idpath::text, conceptpath::text, vtype FROM children ORDER BY depth, conceptpath;
+ """,
+ [conceptid],
)
+ rows = cursor.fetchall()
column_names = [
"conceptidfrom",
@@ -1213,8 +1224,6 @@ def get_e55_domain(self, conceptid):
"conceptpath",
"vtype",
]
- cursor.execute(sql)
- rows = cursor.fetchall()
class Val(object):
def __init__(self, conceptid):
|
"""
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import uuid
import copy
from operator import itemgetter
from operator import methodcaller
from django.db import transaction, connection
from django.db.models import Q
from arches.app.models import models
from arches.app.models.system_settings import settings
from arches.app.search.search_engine_factory import SearchEngineInstance as se
from arches.app.search.elasticsearch_dsl_builder import Term, Query, Bool, Match, Terms
from arches.app.search.mappings import CONCEPTS_INDEX
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
from django.utils.translation import ugettext as _
from django.utils.translation import get_language
from django.db import IntegrityError
import logging
logger = logging.getLogger(__name__)
CORE_CONCEPTS = (
"00000000-0000-0000-0000-000000000001",
"00000000-0000-0000-0000-000000000004",
"00000000-0000-0000-0000-000000000005",
"00000000-0000-0000-0000-000000000006",
)
class Concept(object):
def __init__(self, *args, **kwargs):
self.id = ""
self.nodetype = ""
self.legacyoid = ""
self.relationshiptype = ""
self.values = []
self.subconcepts = []
self.parentconcepts = []
self.relatedconcepts = []
self.hassubconcepts = False
if len(args) != 0:
if isinstance(args[0], str):
try:
uuid.UUID(args[0])
self.get(args[0])
except (ValueError):
self.load(JSONDeserializer().deserialize(args[0]))
elif isinstance(args[0], dict):
self.load(args[0])
elif isinstance(args[0], object):
self.load(args[0])
def __unicode__(self):
return ("%s - %s") % (self.get_preflabel().value, self.id)
def __hash__(self):
return hash(self.id)
def __eq__(self, x):
return hash(self) == hash(x)
def __ne__(self, x):
return hash(self) != hash(x)
def load(self, value):
if isinstance(value, dict):
self.id = str(value["id"]) if "id" in value else ""
self.nodetype = value["nodetype"] if "nodetype" in value else ""
self.legacyoid = value["legacyoid"] if "legacyoid" in value else ""
self.relationshiptype = value["relationshiptype"] if "relationshiptype" in value else ""
if "values" in value:
for val in value["values"]:
self.addvalue(val)
if "subconcepts" in value:
for subconcept in value["subconcepts"]:
self.addsubconcept(subconcept)
if "parentconcepts" in value:
for parentconcept in value["parentconcepts"]:
self.addparent(parentconcept)
if "relatedconcepts" in value:
for relatedconcept in value["relatedconcepts"]:
self.addrelatedconcept(relatedconcept)
if isinstance(value, models.Concept):
self.id = str(value.pk)
self.nodetype = value.nodetype_id
self.legacyoid = value.legacyoid
def get(
self,
id="",
legacyoid="",
include_subconcepts=False,
include_parentconcepts=False,
include_relatedconcepts=False,
exclude=[],
include=[],
depth_limit=None,
up_depth_limit=None,
lang=settings.LANGUAGE_CODE,
semantic=True,
pathway_filter=None,
**kwargs,
):
if id != "":
self.load(models.Concept.objects.get(pk=id))
elif legacyoid != "":
self.load(models.Concept.objects.get(legacyoid=legacyoid))
_cache = kwargs.pop("_cache", {})
_cache[self.id] = self.__class__(
{"id": self.id, "nodetype": self.nodetype, "legacyoid": self.legacyoid, "relationshiptype": self.relationshiptype}
)
if semantic == True:
pathway_filter = (
pathway_filter
if pathway_filter
else Q(relationtype__category="Semantic Relations") | Q(relationtype__category="Properties")
)
else:
pathway_filter = pathway_filter if pathway_filter else Q(relationtype="member") | Q(relationtype="hasCollection")
if self.id != "":
nodetype = kwargs.pop("nodetype", self.nodetype)
uplevel = kwargs.pop("uplevel", 0)
downlevel = kwargs.pop("downlevel", 0)
depth_limit = depth_limit if depth_limit is None else int(depth_limit)
up_depth_limit = up_depth_limit if up_depth_limit is None else int(up_depth_limit)
if include is not None:
if len(include) > 0 and len(exclude) > 0:
raise Exception(_("Only include values for include or exclude, but not both"))
include = (
include if len(include) != 0 else models.DValueType.objects.distinct("category").values_list("category", flat=True)
)
include = set(include).difference(exclude)
exclude = []
if len(include) > 0:
values = models.Value.objects.filter(concept=self.id)
for value in values:
if value.valuetype.category in include:
self.values.append(ConceptValue(value))
hassubconcepts = models.Relation.objects.filter(Q(conceptfrom=self.id), pathway_filter, ~Q(relationtype="related"))[0:1]
if len(hassubconcepts) > 0:
self.hassubconcepts = True
if include_subconcepts:
conceptrealations = models.Relation.objects.filter(Q(conceptfrom=self.id), pathway_filter, ~Q(relationtype="related"))
if depth_limit is None or downlevel < depth_limit:
if depth_limit is not None:
downlevel = downlevel + 1
for relation in conceptrealations:
subconcept = (
_cache[str(relation.conceptto_id)]
if str(relation.conceptto_id) in _cache
else self.__class__().get(
id=relation.conceptto_id,
include_subconcepts=include_subconcepts,
include_parentconcepts=include_parentconcepts,
include_relatedconcepts=include_relatedconcepts,
exclude=exclude,
include=include,
depth_limit=depth_limit,
up_depth_limit=up_depth_limit,
downlevel=downlevel,
uplevel=uplevel,
nodetype=nodetype,
semantic=semantic,
pathway_filter=pathway_filter,
_cache=_cache.copy(),
lang=lang,
)
)
subconcept.relationshiptype = relation.relationtype_id
self.subconcepts.append(subconcept)
self.subconcepts = sorted(
self.subconcepts, key=lambda concept: self.natural_keys(concept.get_sortkey(lang)), reverse=False
)
# self.subconcepts = sorted(self.subconcepts, key=methodcaller(
# 'get_sortkey', lang=lang), reverse=False)
if include_parentconcepts:
conceptrealations = models.Relation.objects.filter(Q(conceptto=self.id), pathway_filter, ~Q(relationtype="related"))
if up_depth_limit is None or uplevel < up_depth_limit:
if up_depth_limit is not None:
uplevel = uplevel + 1
for relation in conceptrealations:
parentconcept = (
_cache[str(relation.conceptfrom_id)]
if str(relation.conceptfrom_id) in _cache
else self.__class__().get(
id=relation.conceptfrom_id,
include_subconcepts=False,
include_parentconcepts=include_parentconcepts,
include_relatedconcepts=include_relatedconcepts,
exclude=exclude,
include=include,
depth_limit=depth_limit,
up_depth_limit=up_depth_limit,
downlevel=downlevel,
uplevel=uplevel,
nodetype=nodetype,
semantic=semantic,
pathway_filter=pathway_filter,
_cache=_cache.copy(),
lang=lang,
)
)
parentconcept.relationshiptype = relation.relationtype_id
self.parentconcepts.append(parentconcept)
if include_relatedconcepts:
conceptrealations = models.Relation.objects.filter(
Q(relationtype="related") | Q(relationtype__category="Mapping Properties"),
Q(conceptto=self.id) | Q(conceptfrom=self.id),
)
relations = []
for relation in conceptrealations:
if str(relation.conceptto_id) != self.id and str(relation.relationid) not in relations:
relations.append(str(relation.relationid))
relatedconcept = self.__class__().get(relation.conceptto_id, include=["label"], lang=lang)
relatedconcept.relationshiptype = relation.relationtype_id
self.relatedconcepts.append(relatedconcept)
if str(relation.conceptfrom_id) != self.id and str(relation.relationid) not in relations:
relations.append(str(relation.relationid))
relatedconcept = self.__class__().get(relation.conceptfrom_id, include=["label"], lang=lang)
relatedconcept.relationshiptype = relation.relationtype_id
self.relatedconcepts.append(relatedconcept)
return self
def save(self):
self.id = self.id if (self.id != "" and self.id is not None) else str(uuid.uuid4())
concept, created = models.Concept.objects.get_or_create(
pk=self.id, defaults={"legacyoid": self.legacyoid if self.legacyoid != "" else self.id, "nodetype_id": self.nodetype}
)
for value in self.values:
if not isinstance(value, ConceptValue):
value = ConceptValue(value)
value.conceptid = self.id
value.save()
for parentconcept in self.parentconcepts:
parentconcept.save()
parentconcept.add_relation(self, parentconcept.relationshiptype)
for subconcept in self.subconcepts:
subconcept.save()
self.add_relation(subconcept, subconcept.relationshiptype)
# if we're moving a Concept Scheme below another Concept or Concept Scheme
if len(self.parentconcepts) > 0 and concept.nodetype_id == "ConceptScheme":
concept.nodetype_id = "Concept"
concept.save()
self.load(concept)
for relation in models.Relation.objects.filter(conceptfrom=concept, relationtype_id="hasTopConcept"):
relation.relationtype_id = "narrower"
relation.save()
for relatedconcept in self.relatedconcepts:
self.add_relation(relatedconcept, relatedconcept.relationshiptype)
if relatedconcept.relationshiptype == "member":
child_concepts = relatedconcept.get(include_subconcepts=True)
def applyRelationship(concept):
for subconcept in concept.subconcepts:
concept.add_relation(subconcept, relatedconcept.relationshiptype)
child_concepts.traverse(applyRelationship)
return concept
def delete(self, delete_self=False):
"""
Deletes any subconcepts associated with this concept and additionally this concept if 'delete_self' is True
If any parentconcepts or relatedconcepts are included then it will only delete the relationship to those concepts but not the concepts themselves
If any values are passed, then those values as well as the relationship to those values will be deleted
Note, django will automatically take care of deleting any db models that have a foreign key relationship to the model being deleted
(eg: deleting a concept model will also delete all values and relationships), but because we need to manage deleting
parent concepts and related concepts and values we have to do that here too
"""
for subconcept in self.subconcepts:
concepts_to_delete = Concept.gather_concepts_to_delete(subconcept)
for key, concept in concepts_to_delete.items():
models.Concept.objects.get(pk=key).delete()
for parentconcept in self.parentconcepts:
relations_filter = (
(Q(relationtype__category="Semantic Relations") | Q(relationtype="hasTopConcept"))
& Q(conceptfrom=parentconcept.id)
& Q(conceptto=self.id)
)
conceptrelations = models.Relation.objects.filter(relations_filter)
for relation in conceptrelations:
relation.delete()
if models.Relation.objects.filter(relations_filter).count() == 0:
# we've removed all parent concepts so now this concept needs to be promoted to a Concept Scheme
concept = models.Concept.objects.get(pk=self.id)
concept.nodetype_id = "ConceptScheme"
concept.save()
self.load(concept)
for relation in models.Relation.objects.filter(conceptfrom=concept, relationtype_id="narrower"):
relation.relationtype_id = "hasTopConcept"
relation.save()
deletedrelatedconcepts = []
for relatedconcept in self.relatedconcepts:
conceptrelations = models.Relation.objects.filter(
Q(relationtype="related") | Q(relationtype="member") | Q(relationtype__category="Mapping Properties"),
conceptto=relatedconcept.id,
conceptfrom=self.id,
)
for relation in conceptrelations:
relation.delete()
deletedrelatedconcepts.append(relatedconcept)
conceptrelations = models.Relation.objects.filter(
Q(relationtype="related") | Q(relationtype="member") | Q(relationtype__category="Mapping Properties"),
conceptfrom=relatedconcept.id,
conceptto=self.id,
)
for relation in conceptrelations:
relation.delete()
deletedrelatedconcepts.append(relatedconcept)
for deletedrelatedconcept in deletedrelatedconcepts:
if deletedrelatedconcept in self.relatedconcepts:
self.relatedconcepts.remove(deletedrelatedconcept)
for value in self.values:
if not isinstance(value, ConceptValue):
value = ConceptValue(value)
value.delete()
if delete_self:
concepts_to_delete = Concept.gather_concepts_to_delete(self)
for key, concept in concepts_to_delete.items():
# delete only member relationships if the nodetype == Collection
if concept.nodetype == "Collection":
concept = Concept().get(
id=concept.id,
include_subconcepts=True,
include_parentconcepts=True,
include=["label"],
up_depth_limit=1,
semantic=False,
)
def find_concepts(concept):
if len(concept.parentconcepts) <= 1:
for subconcept in concept.subconcepts:
conceptrelation = models.Relation.objects.get(
conceptfrom=concept.id, conceptto=subconcept.id, relationtype="member"
)
conceptrelation.delete()
find_concepts(subconcept)
find_concepts(concept)
# if the concept is a collection, loop through the nodes and delete their rdmCollection values
for node in models.Node.objects.filter(config__rdmCollection=concept.id):
node.config["rdmCollection"] = None
node.save()
models.Concept.objects.get(pk=key).delete()
return
def add_relation(self, concepttorelate, relationtype):
"""
Relates this concept to 'concepttorelate' via the relationtype
"""
relation, created = models.Relation.objects.get_or_create(
conceptfrom_id=self.id, conceptto_id=concepttorelate.id, relationtype_id=relationtype
)
return relation
@staticmethod
def gather_concepts_to_delete(concept, lang=settings.LANGUAGE_CODE):
"""
Gets a dictionary of all the concepts ids to delete
The values of the dictionary keys differ somewhat depending on the node type being deleted
If the nodetype == 'Concept' then return ConceptValue objects keyed to the concept id
If the nodetype == 'ConceptScheme' then return a ConceptValue object with the value set to any ONE prefLabel keyed to the concept id
We do this because it takes so long to gather the ids of the concepts when deleting a Scheme or Group
"""
concepts_to_delete = {}
# Here we have to worry about making sure we don't delete nodes that have more than 1 parent
if concept.nodetype == "Concept":
concept = Concept().get(
id=concept.id, include_subconcepts=True, include_parentconcepts=True, include=["label"], up_depth_limit=1
)
def find_concepts(concept):
if len(concept.parentconcepts) <= 1:
concepts_to_delete[concept.id] = concept
for subconcept in concept.subconcepts:
find_concepts(subconcept)
find_concepts(concept)
return concepts_to_delete
# here we can just delete everything and so use a recursive CTE to get the concept ids much more quickly
if concept.nodetype == "ConceptScheme":
concepts_to_delete[concept.id] = concept
rows = Concept().get_child_concepts(concept.id)
for row in rows:
if row[0] not in concepts_to_delete:
concepts_to_delete[row[0]] = Concept({"id": row[0]})
concepts_to_delete[row[0]].addvalue({"id": row[2], "conceptid": row[0], "value": row[1]})
if concept.nodetype == "Collection":
concepts_to_delete[concept.id] = concept
rows = Concept().get_child_collections(concept.id)
for row in rows:
if row[0] not in concepts_to_delete:
concepts_to_delete[row[0]] = Concept({"id": row[0]})
concepts_to_delete[row[0]].addvalue({"id": row[2], "conceptid": row[0], "value": row[1]})
return concepts_to_delete
def get_child_collections_hierarchically(self, conceptid, child_valuetypes=None, offset=0, limit=50, query=None):
child_valuetypes = child_valuetypes if child_valuetypes else ["prefLabel"]
columns = "valueidto::text, conceptidto::text, valueto, valuetypeto, depth, count(*) OVER() AS full_count, collector"
return self.get_child_edges(
conceptid, ["member"], child_valuetypes, offset=offset, limit=limit, order_hierarchically=True, query=query, columns=columns
)
def get_child_collections(self, conceptid, child_valuetypes=None, parent_valuetype="prefLabel", columns=None, depth_limit=""):
child_valuetypes = child_valuetypes if child_valuetypes else ["prefLabel"]
columns = columns if columns else "conceptidto::text, valueto, valueidto::text"
return self.get_child_edges(conceptid, ["member"], child_valuetypes, parent_valuetype, columns, depth_limit)
def get_child_concepts(self, conceptid, child_valuetypes=None, parent_valuetype="prefLabel", columns=None, depth_limit=""):
columns = columns if columns else "conceptidto::text, valueto, valueidto::text"
return self.get_child_edges(conceptid, ["narrower", "hasTopConcept"], child_valuetypes, parent_valuetype, columns, depth_limit)
def get_child_concepts_for_indexing(self, conceptid, child_valuetypes=None, parent_valuetype="prefLabel", depth_limit=""):
columns = "valueidto::text, conceptidto::text, valuetypeto, categoryto, valueto, languageto"
data = self.get_child_edges(conceptid, ["narrower", "hasTopConcept"], child_valuetypes, parent_valuetype, columns, depth_limit)
return [dict(list(zip(["id", "conceptid", "type", "category", "value", "language"], d)), top_concept="") for d in data]
def get_child_edges(
self,
conceptid,
relationtypes,
child_valuetypes=None,
parent_valuetype="prefLabel",
columns=None,
depth_limit=None,
offset=None,
limit=20,
order_hierarchically=False,
query=None,
languageid=None,
):
"""
Recursively builds a list of concept relations for a given concept and all it's subconcepts based on its relationship type and valuetypes.
"""
# if the conceptid isn't a UUID then Postgres will throw an error and transactions will be aborted #7822
try:
uuid.UUID(conceptid)
except:
return []
languageid = get_language() if languageid is None else languageid
relationtypes = " or ".join(["r.relationtype = '%s'" % (relationtype) for relationtype in relationtypes])
depth_limit = "and depth < %s" % depth_limit if depth_limit else ""
child_valuetypes = ("','").join(
child_valuetypes if child_valuetypes else models.DValueType.objects.filter(category="label").values_list("valuetype", flat=True)
)
limit_clause = " limit %s offset %s" % (limit, offset) if offset is not None else ""
if order_hierarchically:
sql = """
WITH RECURSIVE
ordered_relationships AS (
(
SELECT r.conceptidfrom, r.conceptidto, r.relationtype, (
SELECT value
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
CASE WHEN languageid = '{languageid}' THEN 10
WHEN languageid like '{short_languageid}%' THEN 5
WHEN languageid like '{default_languageid}%' THEN 2
ELSE 0
END
) desc limit 1
) as valuesto,
(
SELECT value::int
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('sortorder')
limit 1
) as sortorder,
(
SELECT value
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('collector')
limit 1
) as collector
FROM relations r
WHERE r.conceptidfrom = '{conceptid}'
and ({relationtypes})
ORDER BY sortorder, valuesto
)
UNION
(
SELECT r.conceptidfrom, r.conceptidto, r.relationtype,(
SELECT value
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
CASE WHEN languageid = '{languageid}' THEN 10
WHEN languageid like '{short_languageid}%' THEN 5
WHEN languageid like '{default_languageid}%' THEN 2
ELSE 0
END
) desc limit 1
) as valuesto,
(
SELECT value::int
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('sortorder')
limit 1
) as sortorder,
(
SELECT value
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('collector')
limit 1
) as collector
FROM relations r
JOIN ordered_relationships b ON(b.conceptidto = r.conceptidfrom)
WHERE ({relationtypes})
ORDER BY sortorder, valuesto
)
),
children AS (
SELECT r.conceptidfrom, r.conceptidto,
to_char(row_number() OVER (), 'fm000000') as row,
r.collector,
1 AS depth ---|NonRecursive Part
FROM ordered_relationships r
WHERE r.conceptidfrom = '{conceptid}'
and ({relationtypes})
UNION
SELECT r.conceptidfrom, r.conceptidto,
row || '-' || to_char(row_number() OVER (), 'fm000000'),
r.collector,
depth+1 ---|RecursivePart
FROM ordered_relationships r
JOIN children b ON(b.conceptidto = r.conceptidfrom)
WHERE ({relationtypes})
{depth_limit}
)
{subquery}
SELECT
(
select row_to_json(d)
FROM (
SELECT *
FROM values
WHERE conceptid={recursive_table}.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
CASE WHEN languageid = '{languageid}' THEN 10
WHEN languageid like '{short_languageid}%' THEN 5
WHEN languageid like '{default_languageid}%' THEN 2
ELSE 0
END
) desc limit 1
) d
) as valueto,
depth, collector, count(*) OVER() AS full_count
FROM {recursive_table} order by row {limit_clause};
"""
subquery = (
""", results as (
SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
FROM children c
JOIN values ON(values.conceptid = c.conceptidto)
WHERE LOWER(values.value) like '%%%s%%'
AND values.valuetype in ('prefLabel')
UNION
SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
FROM children c
JOIN results r on (r.conceptidfrom=c.conceptidto)
)"""
% query.lower()
if query is not None
else ""
)
recursive_table = "results" if query else "children"
sql = sql.format(
conceptid=conceptid,
relationtypes=relationtypes,
child_valuetypes=child_valuetypes,
parent_valuetype=parent_valuetype,
depth_limit=depth_limit,
limit_clause=limit_clause,
subquery=subquery,
recursive_table=recursive_table,
languageid=languageid,
short_languageid=languageid.split("-")[0],
default_languageid=settings.LANGUAGE_CODE,
)
else:
sql = """
WITH RECURSIVE
children AS (
SELECT r.conceptidfrom, r.conceptidto, r.relationtype, 1 AS depth
FROM relations r
WHERE r.conceptidfrom = '{conceptid}'
AND ({relationtypes})
UNION
SELECT r.conceptidfrom, r.conceptidto, r.relationtype, depth+1
FROM relations r
JOIN children c ON(c.conceptidto = r.conceptidfrom)
WHERE ({relationtypes})
{depth_limit}
),
results AS (
SELECT
valuefrom.value as valuefrom, valueto.value as valueto,
valuefrom.valueid as valueidfrom, valueto.valueid as valueidto,
valuefrom.valuetype as valuetypefrom, valueto.valuetype as valuetypeto,
valuefrom.languageid as languagefrom, valueto.languageid as languageto,
dtypesfrom.category as categoryfrom, dtypesto.category as categoryto,
c.conceptidfrom, c.conceptidto
FROM values valueto
JOIN d_value_types dtypesto ON(dtypesto.valuetype = valueto.valuetype)
JOIN children c ON(c.conceptidto = valueto.conceptid)
JOIN values valuefrom ON(c.conceptidfrom = valuefrom.conceptid)
JOIN d_value_types dtypesfrom ON(dtypesfrom.valuetype = valuefrom.valuetype)
WHERE valueto.valuetype in ('{child_valuetypes}')
AND valuefrom.valuetype in ('{child_valuetypes}')
)
SELECT distinct {columns}
FROM results {limit_clause}
"""
if not columns:
columns = """
conceptidfrom::text, conceptidto::text,
valuefrom, valueto,
valueidfrom::text, valueidto::text,
valuetypefrom, valuetypeto,
languagefrom, languageto,
categoryfrom, categoryto
"""
sql = sql.format(
conceptid=conceptid,
relationtypes=relationtypes,
child_valuetypes=child_valuetypes,
columns=columns,
depth_limit=depth_limit,
limit_clause=limit_clause,
)
cursor = connection.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
return rows
def traverse(self, func, direction="down", scope=None, **kwargs):
"""
Traverses a concept graph from self to leaf (direction='down') or root (direction='up') calling
the given function on each node, passes an optional scope to each function
Return a value from the function to prematurely end the traversal
"""
_cache = kwargs.pop("_cache", [])
if self.id not in _cache:
_cache.append(self.id)
if scope is None:
ret = func(self, **kwargs)
else:
ret = func(self, scope, **kwargs)
# break out of the traversal if the function returns a value
if ret is not None:
return ret
if direction == "down":
for subconcept in self.subconcepts:
ret = subconcept.traverse(func, direction, scope, _cache=_cache, **kwargs)
if ret is not None:
return ret
else:
for parentconcept in self.parentconcepts:
ret = parentconcept.traverse(func, direction, scope, _cache=_cache, **kwargs)
if ret is not None:
return ret
def get_sortkey(self, lang=settings.LANGUAGE_CODE):
for value in self.values:
if value.type == "sortorder":
try:
return float(value.value)
except:
return None
return self.get_preflabel(lang=lang).value
def natural_keys(self, text):
"""
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
float regex comes from https://stackoverflow.com/a/12643073/190597
"""
def atof(text):
try:
retval = float(text)
except ValueError:
retval = text
return retval
return [atof(c) for c in re.split(r"[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)", str(text))]
def get_preflabel(self, lang=settings.LANGUAGE_CODE):
score = 0
ranked_labels = []
if self.values == []:
concept = Concept().get(id=self.id, include_subconcepts=False, include_parentconcepts=False, include=["label"])
else:
concept = self
for value in concept.values:
ranked_label = {"weight": 1, "value": value}
if value.type == "prefLabel":
ranked_label["weight"] = ranked_label["weight"] * 10
elif value.type == "altLabel":
ranked_label["weight"] = ranked_label["weight"] * 4
if value.language == lang:
ranked_label["weight"] = ranked_label["weight"] * 10
elif value.language.split("-")[0] == lang.split("-")[0]:
ranked_label["weight"] = ranked_label["weight"] * 5
ranked_labels.append(ranked_label)
ranked_labels = sorted(ranked_labels, key=lambda label: label["weight"], reverse=True)
if len(ranked_labels) == 0:
ranked_labels.append({"weight": 1, "value": ConceptValue()})
return ranked_labels[0]["value"]
def flatten(self, ret=None):
"""
Flattens the graph into a unordered list of concepts
"""
if ret is None:
ret = []
ret.append(self)
for subconcept in self.subconcepts:
subconcept.flatten(ret)
return ret
def addparent(self, value):
if isinstance(value, dict):
self.parentconcepts.append(Concept(value))
elif isinstance(value, Concept):
self.parentconcepts.append(value)
else:
raise Exception("Invalid parent concept definition: %s" % (value))
def addsubconcept(self, value):
if isinstance(value, dict):
self.subconcepts.append(Concept(value))
elif isinstance(value, Concept):
self.subconcepts.append(value)
else:
raise Exception(_("Invalid subconcept definition: %s") % (value))
def addrelatedconcept(self, value):
if isinstance(value, dict):
self.relatedconcepts.append(Concept(value))
elif isinstance(value, Concept):
self.relatedconcepts.append(value)
else:
raise Exception(_("Invalid related concept definition: %s") % (value))
def addvalue(self, value):
if isinstance(value, dict):
value["conceptid"] = self.id
self.values.append(ConceptValue(value))
elif isinstance(value, ConceptValue):
self.values.append(value)
elif isinstance(value, models.Value):
self.values.append(ConceptValue(value))
else:
raise Exception(_("Invalid value definition: %s") % (value))
def index(self, scheme=None):
if scheme is None:
scheme = self.get_context()
for value in self.values:
value.index(scheme=scheme)
if self.nodetype == "ConceptScheme":
scheme = None
for subconcept in self.subconcepts:
subconcept.index(scheme=scheme)
def bulk_index(self):
concept_docs = []
if self.nodetype == "ConceptScheme":
concept = Concept().get(id=self.id, values=["label"])
concept.index()
for topConcept in self.get_child_concepts_for_indexing(self.id, depth_limit=1):
concept = Concept().get(id=topConcept["conceptid"])
scheme = concept.get_context()
topConcept["top_concept"] = scheme.id
concept_docs.append(se.create_bulk_item(index=CONCEPTS_INDEX, id=topConcept["id"], data=topConcept))
for childConcept in concept.get_child_concepts_for_indexing(topConcept["conceptid"]):
childConcept["top_concept"] = scheme.id
concept_docs.append(se.create_bulk_item(index=CONCEPTS_INDEX, id=childConcept["id"], data=childConcept))
if self.nodetype == "Concept":
concept = Concept().get(id=self.id, values=["label"])
scheme = concept.get_context()
concept.index(scheme)
for childConcept in concept.get_child_concepts_for_indexing(self.id):
childConcept["top_concept"] = scheme.id
concept_docs.append(se.create_bulk_item(index=CONCEPTS_INDEX, id=childConcept["id"], data=childConcept))
se.bulk_index(concept_docs)
def delete_index(self, delete_self=False):
def delete_concept_values_index(concepts_to_delete):
for concept in concepts_to_delete.values():
query = Query(se, start=0, limit=10000)
term = Term(field="conceptid", term=concept.id)
query.add_query(term)
query.delete(index=CONCEPTS_INDEX)
if delete_self:
concepts_to_delete = Concept.gather_concepts_to_delete(self)
delete_concept_values_index(concepts_to_delete)
else:
for subconcept in self.subconcepts:
concepts_to_delete = Concept.gather_concepts_to_delete(subconcept)
delete_concept_values_index(concepts_to_delete)
def concept_tree(
self, top_concept="00000000-0000-0000-0000-000000000001", lang=settings.LANGUAGE_CODE, mode="semantic",
):
class concept(object):
def __init__(self, *args, **kwargs):
self.label = ""
self.labelid = ""
self.id = ""
self.sortorder = None
self.load_on_demand = False
self.children = []
def _findNarrowerConcept(conceptid, depth_limit=None, level=0):
labels = models.Value.objects.filter(concept=conceptid)
ret = concept()
temp = Concept()
for label in labels:
temp.addvalue(label)
if label.valuetype_id == "sortorder":
try:
ret.sortorder = float(label.value)
except:
ret.sortorder = None
label = temp.get_preflabel(lang=lang)
ret.label = label.value
ret.id = label.conceptid
ret.labelid = label.id
if mode == "semantic":
conceptrealations = models.Relation.objects.filter(
Q(conceptfrom=conceptid), Q(relationtype__category="Semantic Relations") | Q(relationtype__category="Properties")
)
if mode == "collections":
conceptrealations = models.Relation.objects.filter(
Q(conceptfrom=conceptid), Q(relationtype="member") | Q(relationtype="hasCollection")
)
if depth_limit is not None and len(conceptrealations) > 0 and level >= depth_limit:
ret.load_on_demand = True
else:
if depth_limit is not None:
level = level + 1
for relation in conceptrealations:
ret.children.append(_findNarrowerConcept(relation.conceptto_id, depth_limit=depth_limit, level=level))
ret.children = sorted(
ret.children,
key=lambda concept: self.natural_keys(concept.sortorder if concept.sortorder else concept.label),
reverse=False,
)
return ret
def _findBroaderConcept(conceptid, child_concept, depth_limit=None, level=0):
conceptrealations = models.Relation.objects.filter(
Q(conceptto=conceptid), ~Q(relationtype="related"), ~Q(relationtype__category="Mapping Properties")
)
if len(conceptrealations) > 0 and conceptid != top_concept:
labels = models.Value.objects.filter(concept=conceptrealations[0].conceptfrom_id)
ret = concept()
temp = Concept()
for label in labels:
temp.addvalue(label)
label = temp.get_preflabel(lang=lang)
ret.label = label.value
ret.id = label.conceptid
ret.labelid = label.id
ret.children.append(child_concept)
return _findBroaderConcept(conceptrealations[0].conceptfrom_id, ret, depth_limit=depth_limit, level=level)
else:
return child_concept
graph = []
if self.id is None or self.id == "" or self.id == "None" or self.id == top_concept:
if mode == "semantic":
concepts = models.Concept.objects.filter(nodetype="ConceptScheme")
for conceptmodel in concepts:
graph.append(_findNarrowerConcept(conceptmodel.pk, depth_limit=1))
if mode == "collections":
concepts = models.Concept.objects.filter(nodetype="Collection")
for conceptmodel in concepts:
graph.append(_findNarrowerConcept(conceptmodel.pk, depth_limit=0))
graph = sorted(graph, key=lambda concept: concept.label)
# graph = _findNarrowerConcept(concepts[0].pk, depth_limit=1).children
else:
graph = _findNarrowerConcept(self.id, depth_limit=1).children
# concepts = _findNarrowerConcept(self.id, depth_limit=1)
# graph = [_findBroaderConcept(self.id, concepts, depth_limit=1)]
return graph
def get_paths(self, lang=settings.LANGUAGE_CODE):
def graph_to_paths(current_concept, path=[], path_list=[], _cache=[]):
if len(path) == 0:
current_path = []
else:
current_path = path[:]
current_path.insert(
0,
{
"label": current_concept.get_preflabel(lang=lang).value,
"relationshiptype": current_concept.relationshiptype,
"id": current_concept.id,
},
)
if len(current_concept.parentconcepts) == 0 or current_concept.id in _cache:
path_list.append(current_path[:])
else:
_cache.append(current_concept.id)
for parent in current_concept.parentconcepts:
ret = graph_to_paths(parent, current_path, path_list, _cache)
return path_list
# def graph_to_paths(current_concept, **kwargs):
# path = kwargs.get('path', [])
# path_list = kwargs.get('path_list', [])
# if len(path) == 0:
# current_path = []
# else:
# current_path = path[:]
# current_path.insert(0, {'label': current_concept.get_preflabel(lang=lang).value, 'relationshiptype': current_concept.relationshiptype, 'id': current_concept.id})
# if len(current_concept.parentconcepts) == 0:
# path_list.append(current_path[:])
# # else:
# # for parent in current_concept.parentconcepts:
# # ret = graph_to_paths(parent, current_path, path_list, _cache)
# #return path_list
# self.traverse(graph_to_paths, direction='up')
return graph_to_paths(self)
def get_node_and_links(self, lang=settings.LANGUAGE_CODE):
nodes = [{"concept_id": self.id, "name": self.get_preflabel(lang=lang).value, "type": "Current"}]
links = []
def get_parent_nodes_and_links(current_concept, _cache=[]):
if current_concept.id not in _cache:
_cache.append(current_concept.id)
parents = current_concept.parentconcepts
for parent in parents:
nodes.append(
{
"concept_id": parent.id,
"name": parent.get_preflabel(lang=lang).value,
"type": "Root" if len(parent.parentconcepts) == 0 else "Ancestor",
}
)
links.append(
{"target": current_concept.id, "source": parent.id, "relationship": "broader", }
)
get_parent_nodes_and_links(parent, _cache)
get_parent_nodes_and_links(self)
# def get_parent_nodes_and_links(current_concept):
# parents = current_concept.parentconcepts
# for parent in parents:
# nodes.append({'concept_id': parent.id, 'name': parent.get_preflabel(lang=lang).value, 'type': 'Root' if len(parent.parentconcepts) == 0 else 'Ancestor'})
# links.append({'target': current_concept.id, 'source': parent.id, 'relationship': 'broader' })
# self.traverse(get_parent_nodes_and_links, direction='up')
for child in self.subconcepts:
nodes.append(
{"concept_id": child.id, "name": child.get_preflabel(lang=lang).value, "type": "Descendant", }
)
links.append({"source": self.id, "target": child.id, "relationship": "narrower"})
for related in self.relatedconcepts:
nodes.append(
{"concept_id": related.id, "name": related.get_preflabel(lang=lang).value, "type": "Related", }
)
links.append({"source": self.id, "target": related.id, "relationship": "related"})
# get unique node list and assign unique integer ids for each node (required by d3)
nodes = list({node["concept_id"]: node for node in nodes}.values())
for i in range(len(nodes)):
nodes[i]["id"] = i
for link in links:
link["source"] = i if link["source"] == nodes[i]["concept_id"] else link["source"]
link["target"] = i if link["target"] == nodes[i]["concept_id"] else link["target"]
return {"nodes": nodes, "links": links}
def get_context(self):
"""
get the Top Concept that the Concept particpates in
"""
if self.nodetype == "Concept" or self.nodetype == "Collection":
concept = Concept().get(id=self.id, include_parentconcepts=True, include=None)
def get_scheme_id(concept):
for parentconcept in concept.parentconcepts:
if parentconcept.relationshiptype == "hasTopConcept":
return concept
if len(concept.parentconcepts) > 0:
return concept.traverse(get_scheme_id, direction="up")
else:
return self
else: # like ConceptScheme or EntityType
return self
def get_scheme(self):
"""
get the ConceptScheme that the Concept particpates in
"""
topConcept = self.get_context()
if len(topConcept.parentconcepts) == 1:
if topConcept.parentconcepts[0].nodetype == "ConceptScheme":
return topConcept.parentconcepts[0]
return None
def check_if_concept_in_use(self):
"""Checks if a concept or any of its subconcepts is in use by a resource instance"""
in_use = False
cursor = connection.cursor()
for value in self.values:
sql = (
"""
SELECT count(*) from tiles t, jsonb_each_text(t.tiledata) as json_data
WHERE json_data.value = '%s'
"""
% value.id
)
cursor.execute(sql)
rows = cursor.fetchall()
if rows[0][0] > 0:
in_use = True
break
if in_use is not True:
for subconcept in self.subconcepts:
in_use = subconcept.check_if_concept_in_use()
if in_use == True:
return in_use
return in_use
def get_e55_domain(self, conceptid):
"""
For a given entitytypeid creates a dictionary representing that entitytypeid's concept graph (member pathway) formatted to support
select2 dropdowns
"""
cursor = connection.cursor()
sql = """
WITH RECURSIVE children AS (
SELECT d.conceptidfrom, d.conceptidto, c2.value, c2.valueid as valueid, c.value as valueto, c.valueid as valueidto, c.valuetype as vtype, 1 AS depth, array[d.conceptidto] AS conceptpath, array[c.valueid] AS idpath ---|NonRecursive Part
FROM relations d
JOIN values c ON(c.conceptid = d.conceptidto)
JOIN values c2 ON(c2.conceptid = d.conceptidfrom)
WHERE d.conceptidfrom = '{0}'
and c2.valuetype = 'prefLabel'
and c.valuetype in ('prefLabel', 'sortorder', 'collector')
and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
UNION
SELECT d.conceptidfrom, d.conceptidto, v2.value, v2.valueid as valueid, v.value as valueto, v.valueid as valueidto, v.valuetype as vtype, depth+1, (conceptpath || d.conceptidto), (idpath || v.valueid) ---|RecursivePart
FROM relations d
JOIN children b ON(b.conceptidto = d.conceptidfrom)
JOIN values v ON(v.conceptid = d.conceptidto)
JOIN values v2 ON(v2.conceptid = d.conceptidfrom)
WHERE v2.valuetype = 'prefLabel'
and v.valuetype in ('prefLabel','sortorder', 'collector')
and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
) SELECT conceptidfrom::text, conceptidto::text, value, valueid::text, valueto, valueidto::text, depth, idpath::text, conceptpath::text, vtype FROM children ORDER BY depth, conceptpath;
""".format(
conceptid
)
column_names = [
"conceptidfrom",
"conceptidto",
"value",
"valueid",
"valueto",
"valueidto",
"depth",
"idpath",
"conceptpath",
"vtype",
]
cursor.execute(sql)
rows = cursor.fetchall()
class Val(object):
def __init__(self, conceptid):
self.text = ""
self.conceptid = conceptid
self.id = ""
self.sortorder = ""
self.collector = ""
self.children = []
result = Val(conceptid)
def _findNarrower(val, path, rec):
for conceptid in path:
childids = [child.conceptid for child in val.children]
if conceptid not in childids:
new_val = Val(rec["conceptidto"])
if rec["vtype"] == "sortorder":
new_val.sortorder = rec["valueto"]
elif rec["vtype"] == "prefLabel":
new_val.text = rec["valueto"]
new_val.id = rec["valueidto"]
elif rec["vtype"] == "collector":
new_val.collector = "collector"
val.children.append(new_val)
else:
for child in val.children:
if conceptid == child.conceptid:
if conceptid == path[-1]:
if rec["vtype"] == "sortorder":
child.sortorder = rec["valueto"]
elif rec["vtype"] == "prefLabel":
child.text = rec["valueto"]
child.id = rec["valueidto"]
elif rec["vtype"] == "collector":
child.collector = "collector"
path.pop(0)
_findNarrower(child, path, rec)
val.children.sort(key=lambda x: (x.sortorder, x.text))
for row in rows:
rec = dict(list(zip(column_names, row)))
path = rec["conceptpath"][1:-1].split(",")
_findNarrower(result, path, rec)
return JSONSerializer().serializeToPython(result)["children"]
def make_collection(self):
if len(self.values) == 0:
raise Exception(_("Need to include values when creating a collection"))
values = JSONSerializer().serializeToPython(self.values)
for value in values:
value["id"] = ""
collection_concept = Concept({"nodetype": "Collection", "values": values})
def create_collection(conceptfrom):
for relation in models.Relation.objects.filter(
Q(conceptfrom_id=conceptfrom.id),
Q(relationtype__category="Semantic Relations") | Q(relationtype__category="Properties"),
~Q(relationtype="related"),
):
conceptto = Concept(relation.conceptto)
if conceptfrom == self:
collection_concept.add_relation(conceptto, "member")
else:
conceptfrom.add_relation(conceptto, "member")
create_collection(conceptto)
with transaction.atomic():
collection_concept.save()
create_collection(self)
return collection_concept
class ConceptValue(object):
def __init__(self, *args, **kwargs):
self.id = ""
self.conceptid = ""
self.type = ""
self.category = ""
self.value = ""
self.language = ""
if len(args) != 0:
if isinstance(args[0], str):
try:
uuid.UUID(args[0])
self.get(args[0])
except (ValueError):
self.load(JSONDeserializer().deserialize(args[0]))
elif isinstance(args[0], object):
self.load(args[0])
def __repr__(self):
return ('%s: %s = "%s" in lang %s') % (self.__class__, self.type, self.value, self.language)
def get(self, id=""):
self.load(models.Value.objects.get(pk=id))
return self
def save(self):
if self.value.strip() != "":
self.id = self.id if (self.id != "" and self.id is not None) else str(uuid.uuid4())
value = models.Value()
value.pk = self.id
value.value = self.value
value.concept_id = self.conceptid # models.Concept.objects.get(pk=self.conceptid)
value.valuetype_id = self.type # models.DValueType.objects.get(pk=self.type)
if self.language != "":
# need to normalize language ids to the form xx-XX
lang_parts = self.language.lower().replace("_", "-").split("-")
try:
lang_parts[1] = lang_parts[1].upper()
except:
pass
self.language = "-".join(lang_parts)
value.language_id = self.language # models.DLanguage.objects.get(pk=self.language)
else:
value.language_id = settings.LANGUAGE_CODE
value.save()
self.category = value.valuetype.category
def delete(self):
if self.id != "":
newvalue = models.Value.objects.get(pk=self.id)
if newvalue.valuetype.valuetype == "image":
newvalue = models.FileValue.objects.get(pk=self.id)
newvalue.delete()
self = ConceptValue()
return self
def load(self, value):
if isinstance(value, models.Value):
self.id = str(value.pk)
self.conceptid = str(value.concept_id)
self.type = value.valuetype_id
self.category = value.valuetype.category
self.value = value.value
self.language = value.language_id
if isinstance(value, dict):
self.id = str(value["id"]) if "id" in value else ""
self.conceptid = str(value["conceptid"]) if "conceptid" in value else ""
self.type = value["type"] if "type" in value else ""
self.category = value["category"] if "category" in value else ""
self.value = value["value"] if "value" in value else ""
self.language = value["language"] if "language" in value else ""
def index(self, scheme=None):
if self.category == "label":
data = JSONSerializer().serializeToPython(self)
if scheme is None:
scheme = self.get_scheme_id()
if scheme is None:
raise Exception(_("Index of label failed. Index type (scheme id) could not be derived from the label."))
data["top_concept"] = scheme.id
se.index_data(index=CONCEPTS_INDEX, body=data, idfield="id")
def delete_index(self):
query = Query(se, start=0, limit=10000)
term = Term(field="id", term=self.id)
query.add_query(term)
query.delete(index=CONCEPTS_INDEX)
def get_scheme_id(self):
result = se.search(index=CONCEPTS_INDEX, id=self.id)
if result["found"]:
return Concept(result["top_concept"])
else:
return None
def get_preflabel_from_conceptid(conceptid, lang):
ret = None
default = {
"category": "",
"conceptid": "",
"language": "",
"value": "",
"type": "",
"id": "",
}
query = Query(se)
bool_query = Bool()
bool_query.must(Match(field="type", query="prefLabel", type="phrase"))
bool_query.filter(Terms(field="conceptid", terms=[conceptid]))
query.add_query(bool_query)
preflabels = query.search(index=CONCEPTS_INDEX)["hits"]["hits"]
for preflabel in preflabels:
default = preflabel["_source"]
if preflabel["_source"]["language"] is not None and lang is not None:
# get the label in the preferred language, otherwise get the label in the default language
if preflabel["_source"]["language"] == lang:
return preflabel["_source"]
if preflabel["_source"]["language"].split("-")[0] == lang.split("-")[0]:
ret = preflabel["_source"]
if preflabel["_source"]["language"] == settings.LANGUAGE_CODE and ret is None:
ret = preflabel["_source"]
return default if ret is None else ret
def get_valueids_from_concept_label(label, conceptid=None, lang=None):
def exact_val_match(val, conceptid=None):
# exact term match, don't care about relevance ordering.
# due to language formating issues, and with (hopefully) small result sets
# easier to have filter logic in python than to craft it in dsl
if conceptid is None:
return {"query": {"bool": {"filter": {"match_phrase": {"value": val}}}}}
else:
return {
"query": {
"bool": {"filter": [{"match_phrase": {"value": val}}, {"term": {"conceptid": conceptid}}, ]}
}
}
concept_label_results = se.search(index=CONCEPTS_INDEX, body=exact_val_match(label, conceptid))
if concept_label_results is None:
print("Found no matches for label:'{0}' and concept_id: '{1}'".format(label, conceptid))
return
return [
res["_source"]
for res in concept_label_results["hits"]["hits"]
if lang is None or res["_source"]["language"].lower() == lang.lower()
]
def get_preflabel_from_valueid(valueid, lang):
concept_label = se.search(index=CONCEPTS_INDEX, id=valueid)
if concept_label["found"]:
return get_preflabel_from_conceptid(concept_label["_source"]["conceptid"], lang)
|
GHSA-gmpq-xrxj-xh8m
|
arches/app/views/concept.py
|
@@ -380,8 +380,7 @@ def dropdown(request):
def paged_dropdown(request):
conceptid = request.GET.get("conceptid")
- query = request.GET.get("query", None)
- query = None if query == "" else query
+ query = request.GET.get("query", "")
page = int(request.GET.get("page", 1))
limit = 50
offset = (page - 1) * limit
@@ -405,25 +404,25 @@ def paged_dropdown(request):
found = True
break
if not found:
- sql = """
- SELECT value, valueid
- FROM
- (
- SELECT *, CASE WHEN LOWER(languageid) = '{languageid}' THEN 10
- WHEN LOWER(languageid) like '{short_languageid}%' THEN 5
- ELSE 0
- END score
- FROM values
- ) as vals
- WHERE LOWER(value)='{query}' AND score > 0
- AND valuetype in ('prefLabel')
- ORDER BY score desc limit 1
- """
-
languageid = get_language().lower()
- sql = sql.format(query=query.lower(), languageid=languageid, short_languageid=languageid.split("-")[0])
cursor = connection.cursor()
- cursor.execute(sql)
+ cursor.execute(
+ """
+ SELECT value, valueid
+ FROM
+ (
+ SELECT *, CASE WHEN LOWER(languageid) = %(languageid)s THEN 10
+ WHEN LOWER(languageid) like %(short_languageid)s THEN 5
+ ELSE 0
+ END score
+ FROM values
+ ) as vals
+ WHERE LOWER(value)=%(query)s AND score > 0
+ AND valuetype in ('prefLabel')
+ ORDER BY score desc limit 1
+ """,
+ {"languageid": languageid, "short_languageid": languageid.split("-")[0] + "%", "query": query.lower()},
+ )
rows = cursor.fetchall()
if len(rows) == 1:
|
"""
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import uuid
from django.db import transaction, connection
from django.db.models import Q
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseNotAllowed, HttpResponseServerError
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
from django.utils.translation import get_language
from arches.app.models import models
from arches.app.models.system_settings import settings
from arches.app.models.concept import Concept, ConceptValue, CORE_CONCEPTS, get_preflabel_from_valueid
from arches.app.search.search_engine_factory import SearchEngineInstance as se
from arches.app.search.elasticsearch_dsl_builder import Bool, Match, Query, Nested, Terms, GeoShape, Range, SimpleQueryString
from arches.app.search.mappings import CONCEPTS_INDEX
from arches.app.utils.decorators import group_required
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
from arches.app.utils.response import JSONResponse, JSONErrorResponse
from arches.app.utils.skos import SKOSWriter, SKOSReader
from arches.app.views.base import BaseManagerView
@method_decorator(group_required("RDM Administrator"), name="dispatch")
class RDMView(BaseManagerView):
def get(self, request, conceptid):
lang = request.GET.get("lang", request.LANGUAGE_CODE)
languages = sort_languages(models.Language.objects.all(), lang)
concept_schemes = []
for concept in models.Concept.objects.filter(nodetype="ConceptScheme"):
concept_schemes.append(Concept().get(id=concept.pk, include=["label"]).get_preflabel(lang=lang))
collections = []
for concept in models.Concept.objects.filter(nodetype="Collection"):
collections.append(Concept().get(id=concept.pk, include=["label"]).get_preflabel(lang=lang))
context = self.get_context_data(
main_script="rdm",
active_page="RDM",
languages=languages,
conceptid=conceptid,
concept_schemes=concept_schemes,
collections=collections,
CORE_CONCEPTS=CORE_CONCEPTS,
)
context["nav"]["icon"] = "fa fa-align-left"
context["nav"]["title"] = _("Reference Data Manager")
context["nav"]["help"] = {"title": _("Using the RDM"), "template": "rdm-help"}
return render(request, "rdm.htm", context)
def get_sparql_providers(endpoint=None):
sparql_providers = {}
for provider in settings.SPARQL_ENDPOINT_PROVIDERS:
provider_class = provider["SPARQL_ENDPOINT_PROVIDER"][settings.LANGUAGE_CODE]["value"]
Provider = import_string(provider_class)()
sparql_providers[Provider.endpoint] = Provider
if endpoint:
return sparql_providers[endpoint]
else:
return sparql_providers
def sort_languages(languages, lang):
"""
Sorts languages from the d_languages model by name. If there is more than one
default language or no default language, the default language is defined by lang
"""
if len([l for l in languages if l.isdefault == True]) != 1:
for l in languages:
if l.code == lang:
l.isdefault = True
else:
l.isdefault = False
return sorted(languages, key=lambda x: x.name)
@group_required("RDM Administrator")
def concept(request, conceptid):
f = request.GET.get("f", "json")
mode = request.GET.get("mode", "")
lang = request.GET.get("lang", request.LANGUAGE_CODE)
pretty = request.GET.get("pretty", False)
if request.method == "GET":
include_subconcepts = request.GET.get("include_subconcepts", "true") == "true"
include_parentconcepts = request.GET.get("include_parentconcepts", "true") == "true"
include_relatedconcepts = request.GET.get("include_relatedconcepts", "true") == "true"
emulate_elastic_search = request.GET.get("emulate_elastic_search", "false") == "true"
depth_limit = request.GET.get("depth_limit", None)
depth_limit = 1
if not conceptid:
return render(
request,
"views/rdm/concept-report.htm",
{
"lang": lang,
"concept_count": models.Concept.objects.filter(nodetype="Concept").count(),
"collection_count": models.Concept.objects.filter(nodetype="Collection").count(),
"scheme_count": models.Concept.objects.filter(nodetype="ConceptScheme").count(),
"entitytype_count": models.Concept.objects.filter(nodetype="EntityType").count(),
"default_report": True,
},
)
labels = []
concept_graph = Concept().get(
id=conceptid,
include_subconcepts=include_subconcepts,
include_parentconcepts=include_parentconcepts,
include_relatedconcepts=include_relatedconcepts,
depth_limit=depth_limit,
up_depth_limit=None,
lang=lang,
semantic=(mode == "semantic" or mode == ""),
)
languages = sort_languages(models.Language.objects.all(), lang)
valuetypes = models.DValueType.objects.all()
relationtypes = models.DRelationType.objects.all()
prefLabel = concept_graph.get_preflabel(lang=lang)
for subconcept in concept_graph.subconcepts:
subconcept.prefLabel = subconcept.get_preflabel(lang=lang)
for relatedconcept in concept_graph.relatedconcepts:
relatedconcept.prefLabel = relatedconcept.get_preflabel(lang=lang)
for value in concept_graph.values:
if value.category == "label":
labels.append(value)
if value.type == "image":
value.full_image_url = (
(settings.FORCE_SCRIPT_NAME if settings.FORCE_SCRIPT_NAME is not None else "") + settings.MEDIA_URL + value.value
).replace("//", "/")
if (mode == "semantic" or mode == "") and (
concept_graph.nodetype == "Concept" or concept_graph.nodetype == "ConceptScheme" or concept_graph.nodetype == "EntityType"
):
if concept_graph.nodetype == "ConceptScheme":
parent_relations = relationtypes.filter(category="Properties")
else:
parent_relations = (
relationtypes.filter(category="Semantic Relations")
.exclude(relationtype="related")
.exclude(relationtype="broader")
.exclude(relationtype="broaderTransitive")
)
return render(
request,
"views/rdm/concept-report.htm",
{
"FORCE_SCRIPT_NAME": settings.FORCE_SCRIPT_NAME,
"lang": lang,
"prefLabel": prefLabel,
"labels": labels,
"concept": concept_graph,
"languages": languages,
"sparql_providers": get_sparql_providers(),
"valuetype_labels": valuetypes.filter(category="label"),
"valuetype_notes": valuetypes.filter(category="note"),
"valuetype_related_values": valuetypes.filter(category__in=["undefined", "identifiers"]),
"parent_relations": parent_relations,
"related_relations": relationtypes.filter(Q(category="Mapping Properties") | Q(relationtype="related")),
"concept_paths": concept_graph.get_paths(lang=lang),
"graph_json": JSONSerializer().serialize(concept_graph.get_node_and_links(lang=lang)),
"direct_parents": [parent.get_preflabel(lang=lang) for parent in concept_graph.parentconcepts],
},
)
elif mode == "collections":
return render(
request,
"views/rdm/entitytype-report.htm",
{
"lang": lang,
"prefLabel": prefLabel,
"labels": labels,
"concept": concept_graph,
"languages": languages,
"valuetype_labels": valuetypes.filter(category="label"),
"valuetype_notes": valuetypes.filter(category="note"),
"valuetype_related_values": valuetypes.filter(category__in=["undefined", "identifiers"]),
"related_relations": relationtypes.filter(relationtype="member"),
"concept_paths": concept_graph.get_paths(lang=lang),
},
)
if request.method == "POST":
if len(request.FILES) > 0:
skosfile = request.FILES.get("skosfile", None)
imagefile = request.FILES.get("file", None)
if imagefile:
value = models.FileValue(
valueid=str(uuid.uuid4()),
value=request.FILES.get("file", None),
concept_id=conceptid,
valuetype_id="image",
language_id=lang,
)
value.save()
return JSONResponse(value)
elif skosfile:
overwrite_options = request.POST.get("overwrite_options", None)
staging_options = request.POST.get("staging_options", None)
skos = SKOSReader()
try:
rdf = skos.read_file(skosfile)
ret = skos.save_concepts_from_skos(rdf, overwrite_options, staging_options)
return JSONResponse(ret)
except Exception as e:
return JSONErrorResponse(_('Unable to Load SKOS File'), _('There was an issue saving the contents of the file to Arches. ') + str(e))
else:
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
concept = Concept(data)
concept.save()
concept.index()
return JSONResponse(concept)
if request.method == "DELETE":
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
concept = Concept(data)
delete_self = data["delete_self"] if "delete_self" in data else False
if not (delete_self and concept.id in CORE_CONCEPTS):
if concept.nodetype == "Collection":
concept.delete(delete_self=delete_self)
else:
in_use = False
if delete_self:
check_concept = Concept().get(data["id"], include_subconcepts=True)
in_use = check_concept.check_if_concept_in_use()
if "subconcepts" in data:
for subconcept in data["subconcepts"]:
if in_use == False:
check_concept = Concept().get(subconcept["id"], include_subconcepts=True)
in_use = check_concept.check_if_concept_in_use()
if in_use == False:
concept.delete_index(delete_self=delete_self)
concept.delete(delete_self=delete_self)
else:
return JSONErrorResponse(_('Unable to Delete'), _('This concept or one of it\'s subconcepts is already in use by an existing resource.'), {"in_use": in_use})
return JSONResponse(concept)
return HttpResponseNotFound
def export(request, conceptid):
concept_graphs = [
Concept().get(
id=conceptid,
include_subconcepts=True,
include_parentconcepts=False,
include_relatedconcepts=True,
depth_limit=None,
up_depth_limit=None,
)
]
skos = SKOSWriter()
return HttpResponse(skos.write(concept_graphs, format="pretty-xml"), content_type="application/xml")
def export_collections(request):
concept_graphs = []
for concept in models.Concept.objects.filter(nodetype_id="Collection"):
concept_graphs.append(
Concept().get(
id=concept.pk,
include_subconcepts=True,
include_parentconcepts=False,
include_relatedconcepts=False,
depth_limit=None,
up_depth_limit=None,
semantic=False,
)
)
skos = SKOSWriter()
return HttpResponse(skos.write(concept_graphs, format="pretty-xml"), content_type="application/xml")
def get_concept_collections(request):
lang = request.GET.get("lang", request.LANGUAGE_CODE)
concept_collections = Concept().concept_tree(mode="collections", lang=lang)
return JSONResponse(concept_collections)
@group_required("RDM Administrator")
def make_collection(request, conceptid):
concept = Concept().get(id=conceptid, values=[])
try:
collection_concept = concept.make_collection()
return JSONResponse({'collection': collection_concept, 'title': _('Success'), 'message': _('Collection successfully created from the selected concept')})
except:
return JSONErrorResponse(_('Unable to Make Collection'), _('Unable to make a collection from the selected concept.'))
@group_required("RDM Administrator")
def manage_parents(request, conceptid):
if request.method == "POST":
json = request.body
if json is not None:
data = JSONDeserializer().deserialize(json)
with transaction.atomic():
if len(data["deleted"]) > 0:
concept = Concept().get(id=conceptid, include=None)
for deleted in data["deleted"]:
concept.addparent(deleted)
concept.delete()
concept.bulk_index()
if len(data["added"]) > 0:
concept = Concept().get(id=conceptid)
for added in data["added"]:
concept.addparent(added)
concept.save()
concept.bulk_index()
return JSONResponse(data)
else:
return HttpResponseNotAllowed(["POST"])
return HttpResponseNotFound()
def confirm_delete(request, conceptid):
lang = request.GET.get("lang", request.LANGUAGE_CODE)
concept = Concept().get(id=conceptid)
concepts_to_delete = [
concept.get_preflabel(lang=lang).value for key, concept in Concept.gather_concepts_to_delete(concept, lang=lang).items()
]
# return HttpResponse('<div>Showing only 50 of
# %s concepts</div><ul><li>%s</ul>' % (len(concepts_to_delete), '<li>'.join(concepts_to_delete[:50]) + ''))
return HttpResponse("<ul><li>%s</ul>" % ("<li>".join(concepts_to_delete) + ""))
def dropdown(request):
conceptid = request.GET.get("conceptid")
results = Concept().get_e55_domain(conceptid)
return JSONResponse(results)
def paged_dropdown(request):
conceptid = request.GET.get("conceptid")
query = request.GET.get("query", None)
query = None if query == "" else query
page = int(request.GET.get("page", 1))
limit = 50
offset = (page - 1) * limit
results = Concept().get_child_collections_hierarchically(conceptid, offset=offset, limit=limit, query=query)
total_count = results[0][3] if len(results) > 0 else 0
data = [dict(list(zip(["valueto", "depth", "collector"], d))) for d in results]
data = [
dict(list(zip(["id", "text", "conceptid", "language", "type"], d["valueto"].values())), depth=d["depth"], collector=d["collector"])
for d in data
]
# This try/except block trys to find an exact match to the concept the user is searching and if found
# it will insert it into the results as the first item so that users don't have to scroll to find it.
# See: https://github.com/archesproject/arches/issues/8355
try:
if page == 1:
found = False
for i, d in enumerate(data):
if i <= 7 and d["text"].lower() == query.lower():
found = True
break
if not found:
sql = """
SELECT value, valueid
FROM
(
SELECT *, CASE WHEN LOWER(languageid) = '{languageid}' THEN 10
WHEN LOWER(languageid) like '{short_languageid}%' THEN 5
ELSE 0
END score
FROM values
) as vals
WHERE LOWER(value)='{query}' AND score > 0
AND valuetype in ('prefLabel')
ORDER BY score desc limit 1
"""
languageid = get_language().lower()
sql = sql.format(query=query.lower(), languageid=languageid, short_languageid=languageid.split("-")[0])
cursor = connection.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
if len(rows) == 1:
data.insert(0, {"id": str(rows[0][1]), "text": rows[0][0], "depth": 1, "collector": False})
except:
pass
return JSONResponse({"results": data, "more": offset + limit < total_count})
def get_pref_label(request):
valueid = request.GET.get("valueid")
label = get_preflabel_from_valueid(valueid, request.LANGUAGE_CODE)
return JSONResponse(label)
def search(request):
searchString = request.GET["q"]
removechildren = request.GET.get("removechildren", None)
query = Query(se, start=0, limit=100)
phrase = Match(field="value", query=searchString.lower(), type="phrase_prefix")
query.add_query(phrase)
results = query.search(index=CONCEPTS_INDEX)
ids = []
if removechildren is not None:
ids = [concept[0] for concept in Concept().get_child_concepts(removechildren, columns="conceptidto::text")]
ids.append(removechildren)
newresults = []
cached_scheme_names = {}
for result in results["hits"]["hits"]:
if result["_source"]["conceptid"] not in ids:
# first look to see if we've already retrieved the top concept name
# else look up the top concept name with ES and cache the result
top_concept = result["_source"]["top_concept"]
if top_concept in cached_scheme_names:
result["in_scheme_name"] = cached_scheme_names[top_concept]
else:
query = Query(se, start=0, limit=100)
phrase = Match(field="conceptid", query=top_concept, type="phrase")
query.add_query(phrase)
scheme = query.search(index=CONCEPTS_INDEX)
for label in scheme["hits"]["hits"]:
if label["_source"]["type"] == "prefLabel":
cached_scheme_names[top_concept] = label["_source"]["value"]
result["in_scheme_name"] = label["_source"]["value"]
newresults.append(result)
# Use the db to get the concept context but this is SLOW
# for result in results['hits']['hits']:
# if result['_source']['conceptid'] not in ids:
# concept = Concept().get(id=result['_source']['conceptid'], include_parentconcepts=True)
# pathlist = concept.get_paths()
# result['in_scheme_name'] = pathlist[0][0]['label']
# newresults.append(result)
# def crawl(conceptid, path=[]):
# query = Query(se, start=0, limit=100)
# bool = Bool()
# bool.must(Match(field='conceptto', query=conceptid, type='phrase'))
# bool.must(Match(field='relationtype', query='narrower', type='phrase'))
# query.add_query(bool)
# relations = query.search(index='concept_relations')
# for relation in relations['hits']['hits']:
# path.insert(0, relation)
# crawl(relation['_source']['conceptfrom'], path=path)
# return path
# for result in results['hits']['hits']:
# if result['_source']['conceptid'] not in ids:
# concept_relations = crawl(result['_source']['conceptid'], path=[])
# if len(concept_relations) > 0:
# conceptid = concept_relations[0]['_source']['conceptfrom']
# if conceptid in cached_scheme_names:
# result['in_scheme_name'] = cached_scheme_names[conceptid]
# else:
# result['in_scheme_name'] = get_preflabel_from_conceptid(conceptid, lang=request.LANGUAGE_CODE)['value']
# cached_scheme_names[conceptid] = result['in_scheme_name']
# newresults.append(result)
results["hits"]["hits"] = newresults
return JSONResponse(results)
def add_concepts_from_sparql_endpoint(request, conceptid):
if request.method == "POST":
json = request.body
if json is not None:
data = JSONDeserializer().deserialize(json)
parentconcept = Concept({"id": conceptid, "nodetype": data["model"]["nodetype"]})
if parentconcept.nodetype == "Concept":
relationshiptype = "narrower"
elif parentconcept.nodetype == "ConceptScheme":
relationshiptype = "hasTopConcept"
provider = get_sparql_providers(data["endpoint"])
try:
parentconcept.subconcepts = provider.get_concepts(data["ids"])
except Exception as e:
return HttpResponseServerError(e.message)
for subconcept in parentconcept.subconcepts:
subconcept.relationshiptype = relationshiptype
parentconcept.save()
parentconcept.index()
return JSONResponse(parentconcept, indent=4)
else:
return HttpResponseNotAllowed(["POST"])
return HttpResponseNotFound()
def search_sparql_endpoint_for_concepts(request):
provider = get_sparql_providers(request.GET.get("endpoint"))
results = provider.search_for_concepts(request.GET.get("terms"))
return JSONResponse(results)
def concept_tree(request, mode):
lang = request.GET.get("lang", request.LANGUAGE_CODE)
conceptid = request.GET.get("node", None)
concepts = Concept({"id": conceptid}).concept_tree(lang=lang, mode=mode)
return JSONResponse(concepts, indent=4)
def concept_value(request):
if request.method == "DELETE":
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
value = ConceptValue(data)
value.delete_index()
value.delete()
return JSONResponse(value)
if request.method == "GET":
valueid = request.GET.get("valueid")
value = models.Value.objects.get(pk=valueid)
return JSONResponse(value)
return HttpResponseNotFound
|
GHSA-gmpq-xrxj-xh8m
|
cps/helper.py
|
@@ -734,10 +734,10 @@ def save_cover_from_url(url, book_path):
if not cli.allow_localhost:
# 127.0.x.x, localhost, [::1], [::ffff:7f00:1]
ip = socket.getaddrinfo(urlparse(url).hostname, 0)[0][4][0]
- if ip.startswith("127.") or ip.startswith('::ffff:7f') or ip == "::1":
+ if ip.startswith("127.") or ip.startswith('::ffff:7f') or ip == "::1" or ip == "0.0.0.0" or ip == "::":
log.error("Localhost was accessed for cover upload")
return False, _("You are not allowed to access localhost for cover uploads")
- img = requests.get(url, timeout=(10, 200)) # ToDo: Error Handling
+ img = requests.get(url, timeout=(10, 200), allow_redirects=False) # ToDo: Error Handling
img.raise_for_status()
return save_cover(img, book_path)
except (socket.gaierror,
|
# -*- coding: utf-8 -*-
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
# Copyright (C) 2012-2019 cervinko, idalin, SiphonSquirrel, ouzklcn, akushsky,
# OzzieIsaacs, bodybybuddha, jkrehm, matthazinski, janeczku
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import io
import mimetypes
import re
import shutil
import socket
import unicodedata
from datetime import datetime, timedelta
from tempfile import gettempdir
from urllib.parse import urlparse
import requests
from babel.dates import format_datetime
from babel.units import format_unit
from flask import send_from_directory, make_response, redirect, abort, url_for
from flask_babel import gettext as _
from flask_login import current_user
from sqlalchemy.sql.expression import true, false, and_, text, func
from sqlalchemy.exc import InvalidRequestError, OperationalError
from werkzeug.datastructures import Headers
from werkzeug.security import generate_password_hash
from markupsafe import escape
from urllib.parse import quote
try:
import unidecode
use_unidecode = True
except ImportError:
use_unidecode = False
from . import calibre_db, cli
from .tasks.convert import TaskConvert
from . import logger, config, get_locale, db, ub, kobo_sync_status
from . import gdriveutils as gd
from .constants import STATIC_DIR as _STATIC_DIR
from .subproc_wrapper import process_wait
from .services.worker import WorkerThread, STAT_WAITING, STAT_FAIL, STAT_STARTED, STAT_FINISH_SUCCESS
from .tasks.mail import TaskEmail
log = logger.create()
try:
from wand.image import Image
from wand.exceptions import MissingDelegateError, BlobError
use_IM = True
except (ImportError, RuntimeError) as e:
log.debug('Cannot import Image, generating covers from non jpg files will not work: %s', e)
use_IM = False
MissingDelegateError = BaseException
# Convert existing book entry to new format
def convert_book_format(book_id, calibrepath, old_book_format, new_book_format, user_id, kindle_mail=None):
book = calibre_db.get_book(book_id)
data = calibre_db.get_book_format(book.id, old_book_format)
file_path = os.path.join(calibrepath, book.path, data.name)
if not data:
error_message = _(u"%(format)s format not found for book id: %(book)d", format=old_book_format, book=book_id)
log.error("convert_book_format: %s", error_message)
return error_message
if config.config_use_google_drive:
if not gd.getFileFromEbooksFolder(book.path, data.name + "." + old_book_format.lower()):
error_message = _(u"%(format)s not found on Google Drive: %(fn)s",
format=old_book_format, fn=data.name + "." + old_book_format.lower())
return error_message
else:
if not os.path.exists(file_path + "." + old_book_format.lower()):
error_message = _(u"%(format)s not found: %(fn)s",
format=old_book_format, fn=data.name + "." + old_book_format.lower())
return error_message
# read settings and append converter task to queue
if kindle_mail:
settings = config.get_mail_settings()
settings['subject'] = _('Send to Kindle') # pretranslate Subject for e-mail
settings['body'] = _(u'This e-mail has been sent via Calibre-Web.')
else:
settings = dict()
link = '<a href="{}">{}</a>'.format(url_for('web.show_book', book_id=book.id), escape(book.title)) # prevent xss
txt = u"{} -> {}: {}".format(
old_book_format.upper(),
new_book_format.upper(),
link)
settings['old_book_format'] = old_book_format
settings['new_book_format'] = new_book_format
WorkerThread.add(user_id, TaskConvert(file_path, book.id, txt, settings, kindle_mail, user_id))
return None
def send_test_mail(kindle_mail, user_name):
WorkerThread.add(user_name, TaskEmail(_(u'Calibre-Web test e-mail'), None, None,
config.get_mail_settings(), kindle_mail, _(u"Test e-mail"),
_(u'This e-mail has been sent via Calibre-Web.')))
return
# Send registration email or password reset email, depending on parameter resend (False means welcome email)
def send_registration_mail(e_mail, user_name, default_password, resend=False):
txt = "Hello %s!\r\n" % user_name
if not resend:
txt += "Your new account at Calibre-Web has been created. Thanks for joining us!\r\n"
txt += "Please log in to your account using the following informations:\r\n"
txt += "User name: %s\r\n" % user_name
txt += "Password: %s\r\n" % default_password
txt += "Don't forget to change your password after first login.\r\n"
txt += "Sincerely\r\n\r\n"
txt += "Your Calibre-Web team"
WorkerThread.add(None, TaskEmail(
subject=_(u'Get Started with Calibre-Web'),
filepath=None,
attachment=None,
settings=config.get_mail_settings(),
recipient=e_mail,
taskMessage=_(u"Registration e-mail for user: %(name)s", name=user_name),
text=txt
))
return
def check_send_to_kindle_with_converter(formats):
bookformats = list()
if 'EPUB' in formats and 'MOBI' not in formats:
bookformats.append({'format': 'Mobi',
'convert': 1,
'text': _('Convert %(orig)s to %(format)s and send to Kindle',
orig='Epub',
format='Mobi')})
if 'AZW3' in formats and not 'MOBI' in formats:
bookformats.append({'format': 'Mobi',
'convert': 2,
'text': _('Convert %(orig)s to %(format)s and send to Kindle',
orig='Azw3',
format='Mobi')})
return bookformats
def check_send_to_kindle(entry):
"""
returns all available book formats for sending to Kindle
"""
formats = list()
bookformats = list()
if len(entry.data):
for ele in iter(entry.data):
if ele.uncompressed_size < config.mail_size:
formats.append(ele.format)
if 'MOBI' in formats:
bookformats.append({'format': 'Mobi',
'convert': 0,
'text': _('Send %(format)s to Kindle', format='Mobi')})
if 'PDF' in formats:
bookformats.append({'format': 'Pdf',
'convert': 0,
'text': _('Send %(format)s to Kindle', format='Pdf')})
if 'AZW' in formats:
bookformats.append({'format': 'Azw',
'convert': 0,
'text': _('Send %(format)s to Kindle', format='Azw')})
if config.config_converterpath:
bookformats.extend(check_send_to_kindle_with_converter(formats))
return bookformats
else:
log.error(u'Cannot find book entry %d', entry.id)
return None
# Check if a reader is existing for any of the book formats, if not, return empty list, otherwise return
# list with supported formats
def check_read_formats(entry):
EXTENSIONS_READER = {'TXT', 'PDF', 'EPUB', 'CBZ', 'CBT', 'CBR', 'DJVU'}
bookformats = list()
if len(entry.data):
for ele in iter(entry.data):
if ele.format.upper() in EXTENSIONS_READER:
bookformats.append(ele.format.lower())
return bookformats
# Files are processed in the following order/priority:
# 1: If Mobi file is existing, it's directly send to kindle email,
# 2: If Epub file is existing, it's converted and send to kindle email,
# 3: If Pdf file is existing, it's directly send to kindle email
def send_mail(book_id, book_format, convert, kindle_mail, calibrepath, user_id):
"""Send email with attachments"""
book = calibre_db.get_book(book_id)
if convert == 1:
# returns None if success, otherwise errormessage
return convert_book_format(book_id, calibrepath, u'epub', book_format.lower(), user_id, kindle_mail)
if convert == 2:
# returns None if success, otherwise errormessage
return convert_book_format(book_id, calibrepath, u'azw3', book_format.lower(), user_id, kindle_mail)
for entry in iter(book.data):
if entry.format.upper() == book_format.upper():
converted_file_name = entry.name + '.' + book_format.lower()
link = '<a href="{}">{}</a>'.format(url_for('web.show_book', book_id=book_id), escape(book.title))
EmailText = _(u"%(book)s send to Kindle", book=link)
WorkerThread.add(user_id, TaskEmail(_(u"Send to Kindle"), book.path, converted_file_name,
config.get_mail_settings(), kindle_mail,
EmailText, _(u'This e-mail has been sent via Calibre-Web.')))
return
return _(u"The requested file could not be read. Maybe wrong permissions?")
def get_valid_filename(value, replace_whitespace=True, chars=128):
"""
Returns the given string converted to a string that can be used for a clean
filename. Limits num characters to 128 max.
"""
if value[-1:] == u'.':
value = value[:-1]+u'_'
value = value.replace("/", "_").replace(":", "_").strip('\0')
if use_unidecode:
if config.config_unicode_filename:
value = (unidecode.unidecode(value))
else:
value = value.replace(u'§', u'SS')
value = value.replace(u'ß', u'ss')
value = unicodedata.normalize('NFKD', value)
re_slugify = re.compile(r'[\W\s-]', re.UNICODE)
value = re_slugify.sub('', value)
if replace_whitespace:
# *+:\"/<>? are replaced by _
value = re.sub(r'[*+:\\\"/<>?]+', u'_', value, flags=re.U)
# pipe has to be replaced with comma
value = re.sub(r'[|]+', u',', value, flags=re.U)
value = value[:chars].strip()
if not value:
raise ValueError("Filename cannot be empty")
return value
def split_authors(values):
authors_list = []
for value in values:
authors = re.split('[&;]', value)
for author in authors:
commas = author.count(',')
if commas == 1:
author_split = author.split(',')
authors_list.append(author_split[1].strip() + ' ' + author_split[0].strip())
elif commas > 1:
authors_list.extend([x.strip() for x in author.split(',')])
else:
authors_list.append(author.strip())
return authors_list
def get_sorted_author(value):
try:
if ',' not in value:
regexes = [r"^(JR|SR)\.?$", r"^I{1,3}\.?$", r"^IV\.?$"]
combined = "(" + ")|(".join(regexes) + ")"
value = value.split(" ")
if re.match(combined, value[-1].upper()):
if len(value) > 1:
value2 = value[-2] + ", " + " ".join(value[:-2]) + " " + value[-1]
else:
value2 = value[0]
elif len(value) == 1:
value2 = value[0]
else:
value2 = value[-1] + ", " + " ".join(value[:-1])
else:
value2 = value
except Exception as ex:
log.error("Sorting author %s failed: %s", value, ex)
if isinstance(list, value2):
value2 = value[0]
else:
value2 = value
return value2
def edit_book_read_status(book_id, read_status=None):
if not config.config_read_column:
book = ub.session.query(ub.ReadBook).filter(and_(ub.ReadBook.user_id == int(current_user.id),
ub.ReadBook.book_id == book_id)).first()
if book:
if read_status is None:
if book.read_status == ub.ReadBook.STATUS_FINISHED:
book.read_status = ub.ReadBook.STATUS_UNREAD
else:
book.read_status = ub.ReadBook.STATUS_FINISHED
else:
book.read_status = ub.ReadBook.STATUS_FINISHED if read_status else ub.ReadBook.STATUS_UNREAD
else:
readBook = ub.ReadBook(user_id=current_user.id, book_id = book_id)
readBook.read_status = ub.ReadBook.STATUS_FINISHED
book = readBook
if not book.kobo_reading_state:
kobo_reading_state = ub.KoboReadingState(user_id=current_user.id, book_id=book_id)
kobo_reading_state.current_bookmark = ub.KoboBookmark()
kobo_reading_state.statistics = ub.KoboStatistics()
book.kobo_reading_state = kobo_reading_state
ub.session.merge(book)
ub.session_commit("Book {} readbit toggled".format(book_id))
else:
try:
calibre_db.update_title_sort(config)
book = calibre_db.get_filtered_book(book_id)
read_status = getattr(book, 'custom_column_' + str(config.config_read_column))
if len(read_status):
if read_status is None:
read_status[0].value = not read_status[0].value
else:
read_status[0].value = read_status is True
calibre_db.session.commit()
else:
cc_class = db.cc_classes[config.config_read_column]
new_cc = cc_class(value=read_status or 1, book=book_id)
calibre_db.session.add(new_cc)
calibre_db.session.commit()
except (KeyError, AttributeError):
log.error(u"Custom Column No.%d is not existing in calibre database", config.config_read_column)
return "Custom Column No.{} is not existing in calibre database".format(config.config_read_column)
except (OperationalError, InvalidRequestError) as e:
calibre_db.session.rollback()
log.error(u"Read status could not set: {}".format(e))
return "Read status could not set: {}".format(e), 400
return ""
# Deletes a book fro the local filestorage, returns True if deleting is successfull, otherwise false
def delete_book_file(book, calibrepath, book_format=None):
# check that path is 2 elements deep, check that target path has no subfolders
if book.path.count('/') == 1:
path = os.path.join(calibrepath, book.path)
if book_format:
for file in os.listdir(path):
if file.upper().endswith("."+book_format):
os.remove(os.path.join(path, file))
return True, None
else:
if os.path.isdir(path):
try:
for root, folders, files in os.walk(path):
for f in files:
os.unlink(os.path.join(root, f))
if len(folders):
log.warning("Deleting book {} failed, path {} has subfolders: {}".format(book.id,
book.path, folders))
return True, _("Deleting bookfolder for book %(id)s failed, path has subfolders: %(path)s",
id=book.id,
path=book.path)
shutil.rmtree(path)
except (IOError, OSError) as e:
log.error("Deleting book %s failed: %s", book.id, e)
return False, _("Deleting book %(id)s failed: %(message)s", id=book.id, message=e)
authorpath = os.path.join(calibrepath, os.path.split(book.path)[0])
if not os.listdir(authorpath):
try:
shutil.rmtree(authorpath)
except (IOError, OSError) as e:
log.error("Deleting authorpath for book %s failed: %s", book.id, e)
return True, None
log.error("Deleting book %s from database only, book path in database not valid: %s",
book.id, book.path)
return True, _("Deleting book %(id)s from database only, book path in database not valid: %(path)s",
id=book.id,
path=book.path)
def clean_author_database(renamed_author, calibre_path="", local_book=None, gdrive=None):
valid_filename_authors = [get_valid_filename(r, chars=96) for r in renamed_author]
for r in renamed_author:
if local_book:
all_books = [local_book]
else:
all_books = calibre_db.session.query(db.Books) \
.filter(db.Books.authors.any(db.Authors.name == r)).all()
for book in all_books:
book_author_path = book.path.split('/')[0]
if book_author_path in valid_filename_authors or local_book:
new_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == r).first()
all_new_authordir = get_valid_filename(new_author.name, chars=96)
all_titledir = book.path.split('/')[1]
all_new_path = os.path.join(calibre_path, all_new_authordir, all_titledir)
all_new_name = get_valid_filename(book.title, chars=42) + ' - ' \
+ get_valid_filename(new_author.name, chars=42)
# change location in database to new author/title path
book.path = os.path.join(all_new_authordir, all_titledir).replace('\\', '/')
for file_format in book.data:
if not gdrive:
shutil.move(os.path.normcase(os.path.join(all_new_path,
file_format.name + '.' + file_format.format.lower())),
os.path.normcase(os.path.join(all_new_path,
all_new_name + '.' + file_format.format.lower())))
else:
gFile = gd.getFileFromEbooksFolder(all_new_path,
file_format.name + '.' + file_format.format.lower())
if gFile:
gd.moveGdriveFileRemote(gFile, all_new_name + u'.' + file_format.format.lower())
gd.updateDatabaseOnEdit(gFile['id'], all_new_name + u'.' + file_format.format.lower())
else:
log.error("File {} not found on gdrive"
.format(all_new_path, file_format.name + '.' + file_format.format.lower()))
file_format.name = all_new_name
def rename_all_authors(first_author, renamed_author, calibre_path="", localbook=None, gdrive=False):
# Create new_author_dir from parameter or from database
# Create new title_dir from database and add id
if first_author:
new_authordir = get_valid_filename(first_author, chars=96)
for r in renamed_author:
new_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == r).first()
old_author_dir = get_valid_filename(r, chars=96)
new_author_rename_dir = get_valid_filename(new_author.name, chars=96)
if gdrive:
gFile = gd.getFileFromEbooksFolder(None, old_author_dir)
if gFile:
gd.moveGdriveFolderRemote(gFile, new_author_rename_dir)
else:
if os.path.isdir(os.path.join(calibre_path, old_author_dir)):
try:
old_author_path = os.path.join(calibre_path, old_author_dir)
new_author_path = os.path.join(calibre_path, new_author_rename_dir)
shutil.move(os.path.normcase(old_author_path), os.path.normcase(new_author_path))
except (OSError) as ex:
log.error("Rename author from: %s to %s: %s", old_author_path, new_author_path, ex)
log.debug(ex, exc_info=True)
return _("Rename author from: '%(src)s' to '%(dest)s' failed with error: %(error)s",
src=old_author_path, dest=new_author_path, error=str(ex))
else:
new_authordir = get_valid_filename(localbook.authors[0].name, chars=96)
return new_authordir
# Moves files in file storage during author/title rename, or from temp dir to file storage
def update_dir_structure_file(book_id, calibre_path, first_author, original_filepath, db_filename, renamed_author):
# get book database entry from id, if original path overwrite source with original_filepath
localbook = calibre_db.get_book(book_id)
if original_filepath:
path = original_filepath
else:
path = os.path.join(calibre_path, localbook.path)
# Create (current) authordir and titledir from database
authordir = localbook.path.split('/')[0]
titledir = localbook.path.split('/')[1]
# Create new_authordir from parameter or from database
# Create new titledir from database and add id
new_authordir = rename_all_authors(first_author, renamed_author, calibre_path, localbook)
if first_author:
if first_author.lower() in [r.lower() for r in renamed_author]:
if os.path.isdir(os.path.join(calibre_path, new_authordir)):
path = os.path.join(calibre_path, new_authordir, titledir)
new_titledir = get_valid_filename(localbook.title, chars=96) + " (" + str(book_id) + ")"
if titledir != new_titledir or authordir != new_authordir or original_filepath:
error = move_files_on_change(calibre_path,
new_authordir,
new_titledir,
localbook,
db_filename,
original_filepath,
path)
if error:
return error
# Rename all files from old names to new names
return rename_files_on_change(first_author, renamed_author, localbook, original_filepath, path, calibre_path)
def upload_new_file_gdrive(book_id, first_author, renamed_author, title, title_dir, original_filepath, filename_ext):
error = False
book = calibre_db.get_book(book_id)
file_name = get_valid_filename(title, chars=42) + ' - ' + \
get_valid_filename(first_author, chars=42) + \
filename_ext
rename_all_authors(first_author, renamed_author, gdrive=True)
gdrive_path = os.path.join(get_valid_filename(first_author, chars=96),
title_dir + " (" + str(book_id) + ")")
book.path = gdrive_path.replace("\\", "/")
gd.uploadFileToEbooksFolder(os.path.join(gdrive_path, file_name).replace("\\", "/"), original_filepath)
error |= rename_files_on_change(first_author, renamed_author, localbook=book, gdrive=True)
return error
def update_dir_structure_gdrive(book_id, first_author, renamed_author):
error = False
book = calibre_db.get_book(book_id)
authordir = book.path.split('/')[0]
titledir = book.path.split('/')[1]
new_authordir = rename_all_authors(first_author, renamed_author, gdrive=True)
new_titledir = get_valid_filename(book.title, chars=96) + u" (" + str(book_id) + u")"
if titledir != new_titledir:
gFile = gd.getFileFromEbooksFolder(os.path.dirname(book.path), titledir)
if gFile:
gd.moveGdriveFileRemote(gFile, new_titledir)
book.path = book.path.split('/')[0] + u'/' + new_titledir
gd.updateDatabaseOnEdit(gFile['id'], book.path) # only child folder affected
else:
error = _(u'File %(file)s not found on Google Drive', file=book.path) # file not found
if authordir != new_authordir and authordir not in renamed_author:
gFile = gd.getFileFromEbooksFolder(os.path.dirname(book.path), new_titledir)
if gFile:
gd.moveGdriveFolderRemote(gFile, new_authordir)
book.path = new_authordir + u'/' + book.path.split('/')[1]
gd.updateDatabaseOnEdit(gFile['id'], book.path)
else:
error = _(u'File %(file)s not found on Google Drive', file=authordir) # file not found
# change location in database to new author/title path
book.path = os.path.join(new_authordir, new_titledir).replace('\\', '/')
error |= rename_files_on_change(first_author, renamed_author, book, gdrive=True)
return error
def move_files_on_change(calibre_path, new_authordir, new_titledir, localbook, db_filename, original_filepath, path):
new_path = os.path.join(calibre_path, new_authordir, new_titledir)
new_name = get_valid_filename(localbook.title, chars=96) + ' - ' + new_authordir
try:
if original_filepath:
if not os.path.isdir(new_path):
os.makedirs(new_path)
shutil.move(os.path.normcase(original_filepath), os.path.normcase(os.path.join(new_path, db_filename)))
log.debug("Moving title: %s to %s/%s", original_filepath, new_path, new_name)
else:
# Check new path is not valid path
if not os.path.exists(new_path):
# move original path to new path
log.debug("Moving title: %s to %s", path, new_path)
shutil.move(os.path.normcase(path), os.path.normcase(new_path))
else: # path is valid copy only files to new location (merge)
log.info("Moving title: %s into existing: %s", path, new_path)
# Take all files and subfolder from old path (strange command)
for dir_name, __, file_list in os.walk(path):
for file in file_list:
shutil.move(os.path.normcase(os.path.join(dir_name, file)),
os.path.normcase(os.path.join(new_path + dir_name[len(path):], file)))
# change location in database to new author/title path
localbook.path = os.path.join(new_authordir, new_titledir).replace('\\','/')
except OSError as ex:
log.error("Rename title from: %s to %s: %s", path, new_path, ex)
log.debug(ex, exc_info=True)
return _("Rename title from: '%(src)s' to '%(dest)s' failed with error: %(error)s",
src=path, dest=new_path, error=str(ex))
return False
def rename_files_on_change(first_author,
renamed_author,
localbook,
orignal_filepath="",
path="",
calibre_path="",
gdrive=False):
# Rename all files from old names to new names
try:
clean_author_database(renamed_author, calibre_path, gdrive=gdrive)
if first_author and first_author not in renamed_author:
clean_author_database([first_author], calibre_path, localbook, gdrive)
if not gdrive and not renamed_author and not orignal_filepath and len(os.listdir(os.path.dirname(path))) == 0:
shutil.rmtree(os.path.dirname(path))
except (OSError, FileNotFoundError) as ex:
log.error("Error in rename file in path %s", ex)
log.debug(ex, exc_info=True)
return _("Error in rename file in path: %(error)s", error=str(ex))
return False
def delete_book_gdrive(book, book_format):
error = None
if book_format:
name = ''
for entry in book.data:
if entry.format.upper() == book_format:
name = entry.name + '.' + book_format
gFile = gd.getFileFromEbooksFolder(book.path, name)
else:
gFile = gd.getFileFromEbooksFolder(os.path.dirname(book.path), book.path.split('/')[1])
if gFile:
gd.deleteDatabaseEntry(gFile['id'])
gFile.Trash()
else:
error = _(u'Book path %(path)s not found on Google Drive', path=book.path) # file not found
return error is None, error
def reset_password(user_id):
existing_user = ub.session.query(ub.User).filter(ub.User.id == user_id).first()
if not existing_user:
return 0, None
if not config.get_mail_server_configured():
return 2, None
try:
password = generate_random_password()
existing_user.password = generate_password_hash(password)
ub.session.commit()
send_registration_mail(existing_user.email, existing_user.name, password, True)
return 1, existing_user.name
except Exception:
ub.session.rollback()
return 0, None
def generate_random_password():
s = "abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%&*()?"
passlen = 8
return "".join(s[c % len(s)] for c in os.urandom(passlen))
def uniq(inpt):
output = []
inpt = [ " ".join(inp.split()) for inp in inpt]
for x in inpt:
if x not in output:
output.append(x)
return output
def check_email(email):
email = valid_email(email)
if ub.session.query(ub.User).filter(func.lower(ub.User.email) == email.lower()).first():
log.error(u"Found an existing account for this e-mail address")
raise Exception(_(u"Found an existing account for this e-mail address"))
return email
def check_username(username):
username = username.strip()
if ub.session.query(ub.User).filter(func.lower(ub.User.name) == username.lower()).scalar():
log.error(u"This username is already taken")
raise Exception (_(u"This username is already taken"))
return username
def valid_email(email):
email = email.strip()
# Regex according to https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/email#validation
if not re.search(r"^[\w.!#$%&'*+\\/=?^_`{|}~-]+@[\w](?:[\w-]{0,61}[\w])?(?:\.[\w](?:[\w-]{0,61}[\w])?)*$",
email):
log.error(u"Invalid e-mail address format")
raise Exception(_(u"Invalid e-mail address format"))
return email
# ################################# External interface #################################
def update_dir_structure(book_id,
calibre_path,
first_author=None, # change author of book to this author
original_filepath=None,
db_filename=None,
renamed_author=None):
renamed_author = renamed_author or []
if config.config_use_google_drive:
return update_dir_structure_gdrive(book_id, first_author, renamed_author)
else:
return update_dir_structure_file(book_id,
calibre_path,
first_author,
original_filepath,
db_filename, renamed_author)
def delete_book(book, calibrepath, book_format):
if config.config_use_google_drive:
return delete_book_gdrive(book, book_format)
else:
return delete_book_file(book, calibrepath, book_format)
def get_cover_on_failure(use_generic_cover):
if use_generic_cover:
return send_from_directory(_STATIC_DIR, "generic_cover.jpg")
else:
return None
def get_book_cover(book_id):
book = calibre_db.get_filtered_book(book_id, allow_show_archived=True)
return get_book_cover_internal(book, use_generic_cover_on_failure=True)
def get_book_cover_with_uuid(book_uuid,
use_generic_cover_on_failure=True):
book = calibre_db.get_book_by_uuid(book_uuid)
return get_book_cover_internal(book, use_generic_cover_on_failure)
def get_book_cover_internal(book, use_generic_cover_on_failure):
if book and book.has_cover:
if config.config_use_google_drive:
try:
if not gd.is_gdrive_ready():
return get_cover_on_failure(use_generic_cover_on_failure)
path = gd.get_cover_via_gdrive(book.path)
if path:
return redirect(path)
else:
log.error('%s/cover.jpg not found on Google Drive', book.path)
return get_cover_on_failure(use_generic_cover_on_failure)
except Exception as ex:
log.debug_or_exception(ex)
return get_cover_on_failure(use_generic_cover_on_failure)
else:
cover_file_path = os.path.join(config.config_calibre_dir, book.path)
if os.path.isfile(os.path.join(cover_file_path, "cover.jpg")):
return send_from_directory(cover_file_path, "cover.jpg")
else:
return get_cover_on_failure(use_generic_cover_on_failure)
else:
return get_cover_on_failure(use_generic_cover_on_failure)
# saves book cover from url
def save_cover_from_url(url, book_path):
try:
if not cli.allow_localhost:
# 127.0.x.x, localhost, [::1], [::ffff:7f00:1]
ip = socket.getaddrinfo(urlparse(url).hostname, 0)[0][4][0]
if ip.startswith("127.") or ip.startswith('::ffff:7f') or ip == "::1":
log.error("Localhost was accessed for cover upload")
return False, _("You are not allowed to access localhost for cover uploads")
img = requests.get(url, timeout=(10, 200)) # ToDo: Error Handling
img.raise_for_status()
return save_cover(img, book_path)
except (socket.gaierror,
requests.exceptions.HTTPError,
requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as ex:
log.info(u'Cover Download Error %s', ex)
return False, _("Error Downloading Cover")
except MissingDelegateError as ex:
log.info(u'File Format Error %s', ex)
return False, _("Cover Format Error")
def save_cover_from_filestorage(filepath, saved_filename, img):
# check if file path exists, otherwise create it, copy file to calibre path and delete temp file
if not os.path.exists(filepath):
try:
os.makedirs(filepath)
except OSError:
log.error(u"Failed to create path for cover")
return False, _(u"Failed to create path for cover")
try:
# upload of jgp file without wand
if isinstance(img, requests.Response):
with open(os.path.join(filepath, saved_filename), 'wb') as f:
f.write(img.content)
else:
if hasattr(img, "metadata"):
# upload of jpg/png... via url
img.save(filename=os.path.join(filepath, saved_filename))
img.close()
else:
# upload of jpg/png... from hdd
img.save(os.path.join(filepath, saved_filename))
except (IOError, OSError):
log.error(u"Cover-file is not a valid image file, or could not be stored")
return False, _(u"Cover-file is not a valid image file, or could not be stored")
return True, None
# saves book cover to gdrive or locally
def save_cover(img, book_path):
content_type = img.headers.get('content-type')
if use_IM:
if content_type not in ('image/jpeg', 'image/png', 'image/webp', 'image/bmp'):
log.error("Only jpg/jpeg/png/webp/bmp files are supported as coverfile")
return False, _("Only jpg/jpeg/png/webp/bmp files are supported as coverfile")
# convert to jpg because calibre only supports jpg
if content_type != 'image/jpg':
try:
if hasattr(img, 'stream'):
imgc = Image(blob=img.stream)
else:
imgc = Image(blob=io.BytesIO(img.content))
imgc.format = 'jpeg'
imgc.transform_colorspace("rgb")
img = imgc
except (BlobError, MissingDelegateError):
log.error("Invalid cover file content")
return False, _("Invalid cover file content")
else:
if content_type not in 'image/jpeg':
log.error("Only jpg/jpeg files are supported as coverfile")
return False, _("Only jpg/jpeg files are supported as coverfile")
if config.config_use_google_drive:
tmp_dir = os.path.join(gettempdir(), 'calibre_web')
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
ret, message = save_cover_from_filestorage(tmp_dir, "uploaded_cover.jpg", img)
if ret is True:
gd.uploadFileToEbooksFolder(os.path.join(book_path, 'cover.jpg').replace("\\","/"),
os.path.join(tmp_dir, "uploaded_cover.jpg"))
log.info("Cover is saved on Google Drive")
return True, None
else:
return False, message
else:
return save_cover_from_filestorage(os.path.join(config.config_calibre_dir, book_path), "cover.jpg", img)
def do_download_file(book, book_format, client, data, headers):
if config.config_use_google_drive:
#startTime = time.time()
df = gd.getFileFromEbooksFolder(book.path, data.name + "." + book_format)
#log.debug('%s', time.time() - startTime)
if df:
return gd.do_gdrive_download(df, headers)
else:
abort(404)
else:
filename = os.path.join(config.config_calibre_dir, book.path)
if not os.path.isfile(os.path.join(filename, data.name + "." + book_format)):
# ToDo: improve error handling
log.error('File not found: %s', os.path.join(filename, data.name + "." + book_format))
if client == "kobo" and book_format == "kepub":
headers["Content-Disposition"] = headers["Content-Disposition"].replace(".kepub", ".kepub.epub")
response = make_response(send_from_directory(filename, data.name + "." + book_format))
# ToDo Check headers parameter
for element in headers:
response.headers[element[0]] = element[1]
log.info('Downloading file: {}'.format(os.path.join(filename, data.name + "." + book_format)))
return response
##################################
def check_unrar(unrarLocation):
if not unrarLocation:
return
if not os.path.exists(unrarLocation):
return _('Unrar binary file not found')
try:
unrarLocation = [unrarLocation]
value = process_wait(unrarLocation, pattern='UNRAR (.*) freeware')
if value:
version = value.group(1)
log.debug("unrar version %s", version)
except (OSError, UnicodeDecodeError) as err:
log.debug_or_exception(err)
return _('Error excecuting UnRar')
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, timedelta):
return {
'__type__': 'timedelta',
'days': obj.days,
'seconds': obj.seconds,
'microseconds': obj.microseconds,
}
raise TypeError("Type %s not serializable" % type(obj))
# helper function for displaying the runtime of tasks
def format_runtime(runtime):
retVal = ""
if runtime.days:
retVal = format_unit(runtime.days, 'duration-day', length="long", locale=get_locale()) + ', '
mins, seconds = divmod(runtime.seconds, 60)
hours, minutes = divmod(mins, 60)
# ToDo: locale.number_symbols._data['timeSeparator'] -> localize time separator ?
if hours:
retVal += '{:d}:{:02d}:{:02d}s'.format(hours, minutes, seconds)
elif minutes:
retVal += '{:2d}:{:02d}s'.format(minutes, seconds)
else:
retVal += '{:2d}s'.format(seconds)
return retVal
# helper function to apply localize status information in tasklist entries
def render_task_status(tasklist):
renderedtasklist = list()
for __, user, __, task in tasklist:
if user == current_user.name or current_user.role_admin():
ret = {}
if task.start_time:
ret['starttime'] = format_datetime(task.start_time, format='short', locale=get_locale())
ret['runtime'] = format_runtime(task.runtime)
# localize the task status
if isinstance(task.stat, int):
if task.stat == STAT_WAITING:
ret['status'] = _(u'Waiting')
elif task.stat == STAT_FAIL:
ret['status'] = _(u'Failed')
elif task.stat == STAT_STARTED:
ret['status'] = _(u'Started')
elif task.stat == STAT_FINISH_SUCCESS:
ret['status'] = _(u'Finished')
else:
ret['status'] = _(u'Unknown Status')
ret['taskMessage'] = "{}: {}".format(_(task.name), task.message)
ret['progress'] = "{} %".format(int(task.progress * 100))
ret['user'] = escape(user) # prevent xss
renderedtasklist.append(ret)
return renderedtasklist
def tags_filters():
negtags_list = current_user.list_denied_tags()
postags_list = current_user.list_allowed_tags()
neg_content_tags_filter = false() if negtags_list == [''] else db.Tags.name.in_(negtags_list)
pos_content_tags_filter = true() if postags_list == [''] else db.Tags.name.in_(postags_list)
return and_(pos_content_tags_filter, ~neg_content_tags_filter)
# checks if domain is in database (including wildcards)
# example SELECT * FROM @TABLE WHERE 'abcdefg' LIKE Name;
# from https://code.luasoftware.com/tutorials/flask/execute-raw-sql-in-flask-sqlalchemy/
# in all calls the email address is checked for validity
def check_valid_domain(domain_text):
sql = "SELECT * FROM registration WHERE (:domain LIKE domain and allow = 1);"
result = ub.session.query(ub.Registration).from_statement(text(sql)).params(domain=domain_text).all()
if not len(result):
return False
sql = "SELECT * FROM registration WHERE (:domain LIKE domain and allow = 0);"
result = ub.session.query(ub.Registration).from_statement(text(sql)).params(domain=domain_text).all()
return not len(result)
def get_cc_columns(filter_config_custom_read=False):
tmpcc = calibre_db.session.query(db.Custom_Columns)\
.filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
cc = []
r = None
if config.config_columns_to_ignore:
r = re.compile(config.config_columns_to_ignore)
for col in tmpcc:
if filter_config_custom_read and config.config_read_column and config.config_read_column == col.id:
continue
if r and r.match(col.name):
continue
cc.append(col)
return cc
def get_download_link(book_id, book_format, client):
book_format = book_format.split(".")[0]
book = calibre_db.get_filtered_book(book_id, allow_show_archived=True)
if book:
data1 = calibre_db.get_book_format(book.id, book_format.upper())
else:
log.error("Book id {} not found for downloading".format(book_id))
abort(404)
if data1:
# collect downloaded books only for registered user and not for anonymous user
if current_user.is_authenticated:
ub.update_download(book_id, int(current_user.id))
file_name = book.title
if len(book.authors) > 0:
file_name = file_name + ' - ' + book.authors[0].name
file_name = get_valid_filename(file_name, replace_whitespace=False)
headers = Headers()
headers["Content-Type"] = mimetypes.types_map.get('.' + book_format, "application/octet-stream")
headers["Content-Disposition"] = "attachment; filename=%s.%s; filename*=UTF-8''%s.%s" % (
quote(file_name.encode('utf-8')), book_format, quote(file_name.encode('utf-8')), book_format)
return do_download_file(book, book_format, client, data1, headers)
else:
abort(404)
|
GHSA-2647-c639-qv2j
|
src/werkzeug/_internal.py
|
@@ -34,7 +34,7 @@
_legal_cookie_chars_re = rb"[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_cookie_re = re.compile(
rb"""
- (?P<key>[^=;]+)
+ (?P<key>[^=;]*)
(?:\s*=\s*
(?P<val>
"(?:[^\\"]|\\.)*" |
@@ -382,16 +382,21 @@ def _cookie_parse_impl(b: bytes) -> t.Iterator[t.Tuple[bytes, bytes]]:
"""Lowlevel cookie parsing facility that operates on bytes."""
i = 0
n = len(b)
+ b += b";"
while i < n:
- match = _cookie_re.search(b + b";", i)
+ match = _cookie_re.match(b, i)
+
if not match:
break
- key = match.group("key").strip()
- value = match.group("val") or b""
i = match.end(0)
+ key = match.group("key").strip()
+
+ if not key:
+ continue
+ value = match.group("val") or b""
yield key, _cookie_unquote(value)
|
import logging
import operator
import re
import string
import sys
import typing
import typing as t
from datetime import date
from datetime import datetime
from datetime import timezone
from itertools import chain
from weakref import WeakKeyDictionary
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
from .wrappers.request import Request # noqa: F401
_logger: t.Optional[logging.Logger] = None
_signature_cache = WeakKeyDictionary() # type: ignore
_epoch_ord = date(1970, 1, 1).toordinal()
_legal_cookie_chars = frozenset(
c.encode("ascii")
for c in f"{string.ascii_letters}{string.digits}/=!#$%&'*+-.^_`|~:"
)
_cookie_quoting_map = {b",": b"\\054", b";": b"\\073", b'"': b'\\"', b"\\": b"\\\\"}
for _i in chain(range(32), range(127, 256)):
_cookie_quoting_map[_i.to_bytes(1, sys.byteorder)] = f"\\{_i:03o}".encode("latin1")
_octal_re = re.compile(rb"\\[0-3][0-7][0-7]")
_quote_re = re.compile(rb"[\\].")
_legal_cookie_chars_re = rb"[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_cookie_re = re.compile(
rb"""
(?P<key>[^=;]+)
(?:\s*=\s*
(?P<val>
"(?:[^\\"]|\\.)*" |
(?:.*?)
)
)?
\s*;
""",
flags=re.VERBOSE,
)
class _Missing:
def __repr__(self) -> str:
return "no value"
def __reduce__(self) -> str:
return "_missing"
_missing = _Missing()
@typing.overload
def _make_encode_wrapper(reference: str) -> t.Callable[[str], str]:
...
@typing.overload
def _make_encode_wrapper(reference: bytes) -> t.Callable[[str], bytes]:
...
def _make_encode_wrapper(reference: t.AnyStr) -> t.Callable[[str], t.AnyStr]:
"""Create a function that will be called with a string argument. If
the reference is bytes, values will be encoded to bytes.
"""
if isinstance(reference, str):
return lambda x: x
return operator.methodcaller("encode", "latin1")
def _check_str_tuple(value: t.Tuple[t.AnyStr, ...]) -> None:
"""Ensure tuple items are all strings or all bytes."""
if not value:
return
item_type = str if isinstance(value[0], str) else bytes
if any(not isinstance(item, item_type) for item in value):
raise TypeError(f"Cannot mix str and bytes arguments (got {value!r})")
_default_encoding = sys.getdefaultencoding()
def _to_bytes(
x: t.Union[str, bytes], charset: str = _default_encoding, errors: str = "strict"
) -> bytes:
if x is None or isinstance(x, bytes):
return x
if isinstance(x, (bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError("Expected bytes")
@typing.overload
def _to_str( # type: ignore
x: None,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> None:
...
@typing.overload
def _to_str(
x: t.Any,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> str:
...
def _to_str(
x: t.Optional[t.Any],
charset: t.Optional[str] = _default_encoding,
errors: str = "strict",
allow_none_charset: bool = False,
) -> t.Optional[t.Union[str, bytes]]:
if x is None or isinstance(x, str):
return x
if not isinstance(x, (bytes, bytearray)):
return str(x)
if charset is None:
if allow_none_charset:
return x
return x.decode(charset, errors) # type: ignore
def _wsgi_decoding_dance(
s: str, charset: str = "utf-8", errors: str = "replace"
) -> str:
return s.encode("latin1").decode(charset, errors)
def _wsgi_encoding_dance(
s: str, charset: str = "utf-8", errors: str = "replace"
) -> str:
if isinstance(s, bytes):
return s.decode("latin1", errors)
return s.encode(charset).decode("latin1", errors)
def _get_environ(obj: t.Union["WSGIEnvironment", "Request"]) -> "WSGIEnvironment":
env = getattr(obj, "environ", obj)
assert isinstance(
env, dict
), f"{type(obj).__name__!r} is not a WSGI environment (has to be a dict)"
return env
def _has_level_handler(logger: logging.Logger) -> bool:
"""Check if there is a handler in the logging chain that will handle
the given logger's effective level.
"""
level = logger.getEffectiveLevel()
current = logger
while current:
if any(handler.level <= level for handler in current.handlers):
return True
if not current.propagate:
break
current = current.parent # type: ignore
return False
class _ColorStreamHandler(logging.StreamHandler):
"""On Windows, wrap stream with Colorama for ANSI style support."""
def __init__(self) -> None:
try:
import colorama
except ImportError:
stream = None
else:
stream = colorama.AnsiToWin32(sys.stderr)
super().__init__(stream)
def _log(type: str, message: str, *args: t.Any, **kwargs: t.Any) -> None:
"""Log a message to the 'werkzeug' logger.
The logger is created the first time it is needed. If there is no
level set, it is set to :data:`logging.INFO`. If there is no handler
for the logger's effective level, a :class:`logging.StreamHandler`
is added.
"""
global _logger
if _logger is None:
_logger = logging.getLogger("werkzeug")
if _logger.level == logging.NOTSET:
_logger.setLevel(logging.INFO)
if not _has_level_handler(_logger):
_logger.addHandler(_ColorStreamHandler())
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
@typing.overload
def _dt_as_utc(dt: None) -> None:
...
@typing.overload
def _dt_as_utc(dt: datetime) -> datetime:
...
def _dt_as_utc(dt: t.Optional[datetime]) -> t.Optional[datetime]:
if dt is None:
return dt
if dt.tzinfo is None:
return dt.replace(tzinfo=timezone.utc)
elif dt.tzinfo != timezone.utc:
return dt.astimezone(timezone.utc)
return dt
_TAccessorValue = t.TypeVar("_TAccessorValue")
class _DictAccessorProperty(t.Generic[_TAccessorValue]):
"""Baseclass for `environ_property` and `header_property`."""
read_only = False
def __init__(
self,
name: str,
default: t.Optional[_TAccessorValue] = None,
load_func: t.Optional[t.Callable[[str], _TAccessorValue]] = None,
dump_func: t.Optional[t.Callable[[_TAccessorValue], str]] = None,
read_only: t.Optional[bool] = None,
doc: t.Optional[str] = None,
) -> None:
self.name = name
self.default = default
self.load_func = load_func
self.dump_func = dump_func
if read_only is not None:
self.read_only = read_only
self.__doc__ = doc
def lookup(self, instance: t.Any) -> t.MutableMapping[str, t.Any]:
raise NotImplementedError
@typing.overload
def __get__(
self, instance: None, owner: type
) -> "_DictAccessorProperty[_TAccessorValue]":
...
@typing.overload
def __get__(self, instance: t.Any, owner: type) -> _TAccessorValue:
...
def __get__(
self, instance: t.Optional[t.Any], owner: type
) -> t.Union[_TAccessorValue, "_DictAccessorProperty[_TAccessorValue]"]:
if instance is None:
return self
storage = self.lookup(instance)
if self.name not in storage:
return self.default # type: ignore
value = storage[self.name]
if self.load_func is not None:
try:
return self.load_func(value)
except (ValueError, TypeError):
return self.default # type: ignore
return value # type: ignore
def __set__(self, instance: t.Any, value: _TAccessorValue) -> None:
if self.read_only:
raise AttributeError("read only property")
if self.dump_func is not None:
self.lookup(instance)[self.name] = self.dump_func(value)
else:
self.lookup(instance)[self.name] = value
def __delete__(self, instance: t.Any) -> None:
if self.read_only:
raise AttributeError("read only property")
self.lookup(instance).pop(self.name, None)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.name}>"
def _cookie_quote(b: bytes) -> bytes:
buf = bytearray()
all_legal = True
_lookup = _cookie_quoting_map.get
_push = buf.extend
for char_int in b:
char = char_int.to_bytes(1, sys.byteorder)
if char not in _legal_cookie_chars:
all_legal = False
char = _lookup(char, char)
_push(char)
if all_legal:
return bytes(buf)
return bytes(b'"' + buf + b'"')
def _cookie_unquote(b: bytes) -> bytes:
if len(b) < 2:
return b
if b[:1] != b'"' or b[-1:] != b'"':
return b
b = b[1:-1]
i = 0
n = len(b)
rv = bytearray()
_push = rv.extend
while 0 <= i < n:
o_match = _octal_re.search(b, i)
q_match = _quote_re.search(b, i)
if not o_match and not q_match:
rv.extend(b[i:])
break
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j):
_push(b[i:k])
_push(b[k + 1 : k + 2])
i = k + 2
else:
_push(b[i:j])
rv.append(int(b[j + 1 : j + 4], 8))
i = j + 4
return bytes(rv)
def _cookie_parse_impl(b: bytes) -> t.Iterator[t.Tuple[bytes, bytes]]:
"""Lowlevel cookie parsing facility that operates on bytes."""
i = 0
n = len(b)
while i < n:
match = _cookie_re.search(b + b";", i)
if not match:
break
key = match.group("key").strip()
value = match.group("val") or b""
i = match.end(0)
yield key, _cookie_unquote(value)
def _encode_idna(domain: str) -> bytes:
# If we're given bytes, make sure they fit into ASCII
if isinstance(domain, bytes):
domain.decode("ascii")
return domain
# Otherwise check if it's already ascii, then return
try:
return domain.encode("ascii")
except UnicodeError:
pass
# Otherwise encode each part separately
return b".".join(p.encode("idna") for p in domain.split("."))
def _decode_idna(domain: t.Union[str, bytes]) -> str:
# If the input is a string try to encode it to ascii to do the idna
# decoding. If that fails because of a unicode error, then we
# already have a decoded idna domain.
if isinstance(domain, str):
try:
domain = domain.encode("ascii")
except UnicodeError:
return domain # type: ignore
# Decode each part separately. If a part fails, try to decode it
# with ascii and silently ignore errors. This makes sense because
# the idna codec does not have error handling.
def decode_part(part: bytes) -> str:
try:
return part.decode("idna")
except UnicodeError:
return part.decode("ascii", "ignore")
return ".".join(decode_part(p) for p in domain.split(b"."))
@typing.overload
def _make_cookie_domain(domain: None) -> None:
...
@typing.overload
def _make_cookie_domain(domain: str) -> bytes:
...
def _make_cookie_domain(domain: t.Optional[str]) -> t.Optional[bytes]:
if domain is None:
return None
domain = _encode_idna(domain)
if b":" in domain:
domain = domain.split(b":", 1)[0]
if b"." in domain:
return domain
raise ValueError(
"Setting 'domain' for a cookie on a server running locally (ex: "
"localhost) is not supported by complying browsers. You should "
"have something like: '127.0.0.1 localhost dev.localhost' on "
"your hosts file and then point your server to run on "
"'dev.localhost' and also set 'domain' for 'dev.localhost'"
)
def _easteregg(app: t.Optional["WSGIApplication"] = None) -> "WSGIApplication":
"""Like the name says. But who knows how it works?"""
def bzzzzzzz(gyver: bytes) -> str:
import base64
import zlib
return zlib.decompress(base64.b64decode(gyver)).decode("ascii")
gyver = "\n".join(
[
x + (77 - len(x)) * " "
for x in bzzzzzzz(
b"""
eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
7f2zLkGNv8b191cD/3vs9Q833z8t"""
).splitlines()
]
)
def easteregged(
environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
def injecting_start_response(
status: str, headers: t.List[t.Tuple[str, str]], exc_info: t.Any = None
) -> t.Callable[[bytes], t.Any]:
headers.append(("X-Powered-By", "Werkzeug"))
return start_response(status, headers, exc_info)
if app is not None and environ.get("QUERY_STRING") != "macgybarchakku":
return app(environ, injecting_start_response)
injecting_start_response("200 OK", [("Content-Type", "text/html")])
return [
f"""\
<!doctype html>
<html lang=en>
<head>
<title>About Werkzeug</title>
<style type="text/css">
body {{ font: 15px Georgia, serif; text-align: center; }}
a {{ color: #333; text-decoration: none; }}
h1 {{ font-size: 30px; margin: 20px 0 10px 0; }}
p {{ margin: 0 0 30px 0; }}
pre {{ font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }}
</style>
</head>
<body>
<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
<p>the Swiss Army knife of Python web development.</p>
<pre>{gyver}\n\n\n</pre>
</body>
</html>""".encode(
"latin1"
)
]
return easteregged
|
GHSA-px8h-6qxv-m22q
|
src/werkzeug/sansio/http.py
|
@@ -126,10 +126,6 @@ def parse_cookie(
def _parse_pairs() -> t.Iterator[t.Tuple[str, str]]:
for key, val in _cookie_parse_impl(cookie): # type: ignore
key_str = _to_str(key, charset, errors, allow_none_charset=True)
-
- if not key_str:
- continue
-
val_str = _to_str(val, charset, errors, allow_none_charset=True)
yield key_str, val_str
|
import re
import typing as t
from datetime import datetime
from .._internal import _cookie_parse_impl
from .._internal import _dt_as_utc
from .._internal import _to_str
from ..http import generate_etag
from ..http import parse_date
from ..http import parse_etags
from ..http import parse_if_range_header
from ..http import unquote_etag
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
def is_resource_modified(
http_range: t.Optional[str] = None,
http_if_range: t.Optional[str] = None,
http_if_modified_since: t.Optional[str] = None,
http_if_none_match: t.Optional[str] = None,
http_if_match: t.Optional[str] = None,
etag: t.Optional[str] = None,
data: t.Optional[bytes] = None,
last_modified: t.Optional[t.Union[datetime, str]] = None,
ignore_if_range: bool = True,
) -> bool:
"""Convenience method for conditional requests.
:param http_range: Range HTTP header
:param http_if_range: If-Range HTTP header
:param http_if_modified_since: If-Modified-Since HTTP header
:param http_if_none_match: If-None-Match HTTP header
:param http_if_match: If-Match HTTP header
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:param ignore_if_range: If `False`, `If-Range` header will be taken into
account.
:return: `True` if the resource was modified, otherwise `False`.
.. versionadded:: 2.2
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError("both data and etag given")
unmodified = False
if isinstance(last_modified, str):
last_modified = parse_date(last_modified)
# HTTP doesn't use microsecond, remove it to avoid false positive
# comparisons. Mark naive datetimes as UTC.
if last_modified is not None:
last_modified = _dt_as_utc(last_modified.replace(microsecond=0))
if_range = None
if not ignore_if_range and http_range is not None:
# https://tools.ietf.org/html/rfc7233#section-3.2
# A server MUST ignore an If-Range header field received in a request
# that does not contain a Range header field.
if_range = parse_if_range_header(http_if_range)
if if_range is not None and if_range.date is not None:
modified_since: t.Optional[datetime] = if_range.date
else:
modified_since = parse_date(http_if_modified_since)
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
etag, _ = unquote_etag(etag)
etag = t.cast(str, etag)
if if_range is not None and if_range.etag is not None:
unmodified = parse_etags(if_range.etag).contains(etag)
else:
if_none_match = parse_etags(http_if_none_match)
if if_none_match:
# https://tools.ietf.org/html/rfc7232#section-3.2
# "A recipient MUST use the weak comparison function when comparing
# entity-tags for If-None-Match"
unmodified = if_none_match.contains_weak(etag)
# https://tools.ietf.org/html/rfc7232#section-3.1
# "Origin server MUST use the strong comparison function when
# comparing entity-tags for If-Match"
if_match = parse_etags(http_if_match)
if if_match:
unmodified = not if_match.is_strong(etag)
return not unmodified
def parse_cookie(
cookie: t.Union[bytes, str, None] = "",
charset: str = "utf-8",
errors: str = "replace",
cls: t.Optional[t.Type["ds.MultiDict"]] = None,
) -> "ds.MultiDict[str, str]":
"""Parse a cookie from a string.
The same key can be provided multiple times, the values are stored
in-order. The default :class:`MultiDict` will have the first value
first, and all values can be retrieved with
:meth:`MultiDict.getlist`.
:param cookie: The cookie header as a string.
:param charset: The charset for the cookie values.
:param errors: The error behavior for the charset decoding.
:param cls: A dict-like class to store the parsed cookies in.
Defaults to :class:`MultiDict`.
.. versionadded:: 2.2
"""
# PEP 3333 sends headers through the environ as latin1 decoded
# strings. Encode strings back to bytes for parsing.
if isinstance(cookie, str):
cookie = cookie.encode("latin1", "replace")
if cls is None:
cls = ds.MultiDict
def _parse_pairs() -> t.Iterator[t.Tuple[str, str]]:
for key, val in _cookie_parse_impl(cookie): # type: ignore
key_str = _to_str(key, charset, errors, allow_none_charset=True)
if not key_str:
continue
val_str = _to_str(val, charset, errors, allow_none_charset=True)
yield key_str, val_str
return cls(_parse_pairs())
# circular dependencies
from .. import datastructures as ds
|
GHSA-px8h-6qxv-m22q
|
tests/test_http.py
|
@@ -412,7 +412,8 @@ def test_is_resource_modified_for_range_requests(self):
def test_parse_cookie(self):
cookies = http.parse_cookie(
"dismiss-top=6; CP=null*; PHPSESSID=0a539d42abc001cdc762809248d4beed;"
- 'a=42; b="\\";"; ; fo234{=bar;blub=Blah; "__Secure-c"=d'
+ 'a=42; b="\\";"; ; fo234{=bar;blub=Blah; "__Secure-c"=d;'
+ "==__Host-eq=bad;__Host-eq=good;"
)
assert cookies.to_dict() == {
"CP": "null*",
@@ -423,6 +424,7 @@ def test_parse_cookie(self):
"fo234{": "bar",
"blub": "Blah",
'"__Secure-c"': "d",
+ "__Host-eq": "good",
}
def test_dump_cookie(self):
|
import base64
from datetime import date
from datetime import datetime
from datetime import timedelta
from datetime import timezone
import pytest
from werkzeug import datastructures
from werkzeug import http
from werkzeug._internal import _wsgi_encoding_dance
from werkzeug.test import create_environ
class TestHTTPUtility:
def test_accept(self):
a = http.parse_accept_header("en-us,ru;q=0.5")
assert list(a.values()) == ["en-us", "ru"]
assert a.best == "en-us"
assert a.find("ru") == 1
pytest.raises(ValueError, a.index, "de")
assert a.to_header() == "en-us,ru;q=0.5"
def test_mime_accept(self):
a = http.parse_accept_header(
"text/xml,application/xml,"
"application/xhtml+xml,"
"application/foo;quiet=no; bar=baz;q=0.6,"
"text/html;q=0.9,text/plain;q=0.8,"
"image/png,*/*;q=0.5",
datastructures.MIMEAccept,
)
pytest.raises(ValueError, lambda: a["missing"])
assert a["image/png"] == 1
assert a["text/plain"] == 0.8
assert a["foo/bar"] == 0.5
assert a["application/foo;quiet=no; bar=baz"] == 0.6
assert a[a.find("foo/bar")] == ("*/*", 0.5)
def test_accept_matches(self):
a = http.parse_accept_header(
"text/xml,application/xml,application/xhtml+xml,"
"text/html;q=0.9,text/plain;q=0.8,"
"image/png",
datastructures.MIMEAccept,
)
assert (
a.best_match(["text/html", "application/xhtml+xml"])
== "application/xhtml+xml"
)
assert a.best_match(["text/html"]) == "text/html"
assert a.best_match(["foo/bar"]) is None
assert a.best_match(["foo/bar", "bar/foo"], default="foo/bar") == "foo/bar"
assert a.best_match(["application/xml", "text/xml"]) == "application/xml"
def test_accept_mime_specificity(self):
a = http.parse_accept_header(
"text/*, text/html, text/html;level=1, */*", datastructures.MIMEAccept
)
assert a.best_match(["text/html; version=1", "text/html"]) == "text/html"
assert a.best_match(["text/html", "text/html; level=1"]) == "text/html; level=1"
def test_charset_accept(self):
a = http.parse_accept_header(
"ISO-8859-1,utf-8;q=0.7,*;q=0.7", datastructures.CharsetAccept
)
assert a["iso-8859-1"] == a["iso8859-1"]
assert a["iso-8859-1"] == 1
assert a["UTF8"] == 0.7
assert a["ebcdic"] == 0.7
def test_language_accept(self):
a = http.parse_accept_header(
"de-AT,de;q=0.8,en;q=0.5", datastructures.LanguageAccept
)
assert a.best == "de-AT"
assert "de_AT" in a
assert "en" in a
assert a["de-at"] == 1
assert a["en"] == 0.5
def test_set_header(self):
hs = http.parse_set_header('foo, Bar, "Blah baz", Hehe')
assert "blah baz" in hs
assert "foobar" not in hs
assert "foo" in hs
assert list(hs) == ["foo", "Bar", "Blah baz", "Hehe"]
hs.add("Foo")
assert hs.to_header() == 'foo, Bar, "Blah baz", Hehe'
def test_list_header(self):
hl = http.parse_list_header("foo baz, blah")
assert hl == ["foo baz", "blah"]
def test_dict_header(self):
d = http.parse_dict_header('foo="bar baz", blah=42')
assert d == {"foo": "bar baz", "blah": "42"}
def test_cache_control_header(self):
cc = http.parse_cache_control_header("max-age=0, no-cache")
assert cc.max_age == 0
assert cc.no_cache
cc = http.parse_cache_control_header(
'private, community="UCI"', None, datastructures.ResponseCacheControl
)
assert cc.private
assert cc["community"] == "UCI"
c = datastructures.ResponseCacheControl()
assert c.no_cache is None
assert c.private is None
c.no_cache = True
assert c.no_cache == "*"
c.private = True
assert c.private == "*"
del c.private
assert c.private is None
# max_age is an int, other types are converted
c.max_age = 3.1
assert c.max_age == 3
del c.max_age
c.s_maxage = 3.1
assert c.s_maxage == 3
del c.s_maxage
assert c.to_header() == "no-cache"
def test_csp_header(self):
csp = http.parse_csp_header(
"default-src 'self'; script-src 'unsafe-inline' *; img-src"
)
assert csp.default_src == "'self'"
assert csp.script_src == "'unsafe-inline' *"
assert csp.img_src is None
def test_authorization_header(self):
a = http.parse_authorization_header("Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
assert a.type == "basic"
assert a.username == "Aladdin"
assert a.password == "open sesame"
a = http.parse_authorization_header(
"Basic 0YDRg9GB0YHQutC40IE60JHRg9C60LLRiw=="
)
assert a.type == "basic"
assert a.username == "русскиЁ"
assert a.password == "Буквы"
a = http.parse_authorization_header("Basic 5pmu6YCa6K+dOuS4reaWhw==")
assert a.type == "basic"
assert a.username == "普通话"
assert a.password == "中文"
a = http.parse_authorization_header(
'''Digest username="Mufasa",
realm="[email protected]",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
uri="/dir/index.html",
qop=auth,
nc=00000001,
cnonce="0a4f113b",
response="6629fae49393a05397450978507c4ef1",
opaque="5ccc069c403ebaf9f0171e9517f40e41"'''
)
assert a.type == "digest"
assert a.username == "Mufasa"
assert a.realm == "[email protected]"
assert a.nonce == "dcd98b7102dd2f0e8b11d0f600bfb0c093"
assert a.uri == "/dir/index.html"
assert a.qop == "auth"
assert a.nc == "00000001"
assert a.cnonce == "0a4f113b"
assert a.response == "6629fae49393a05397450978507c4ef1"
assert a.opaque == "5ccc069c403ebaf9f0171e9517f40e41"
a = http.parse_authorization_header(
'''Digest username="Mufasa",
realm="[email protected]",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
uri="/dir/index.html",
response="e257afa1414a3340d93d30955171dd0e",
opaque="5ccc069c403ebaf9f0171e9517f40e41"'''
)
assert a.type == "digest"
assert a.username == "Mufasa"
assert a.realm == "[email protected]"
assert a.nonce == "dcd98b7102dd2f0e8b11d0f600bfb0c093"
assert a.uri == "/dir/index.html"
assert a.response == "e257afa1414a3340d93d30955171dd0e"
assert a.opaque == "5ccc069c403ebaf9f0171e9517f40e41"
assert http.parse_authorization_header("") is None
assert http.parse_authorization_header(None) is None
assert http.parse_authorization_header("foo") is None
def test_bad_authorization_header_encoding(self):
"""If the base64 encoded bytes can't be decoded as UTF-8"""
content = base64.b64encode(b"\xffser:pass").decode()
assert http.parse_authorization_header(f"Basic {content}") is None
def test_www_authenticate_header(self):
wa = http.parse_www_authenticate_header('Basic realm="WallyWorld"')
assert wa.type == "basic"
assert wa.realm == "WallyWorld"
wa.realm = "Foo Bar"
assert wa.to_header() == 'Basic realm="Foo Bar"'
wa = http.parse_www_authenticate_header(
'''Digest
realm="[email protected]",
qop="auth,auth-int",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
opaque="5ccc069c403ebaf9f0171e9517f40e41"'''
)
assert wa.type == "digest"
assert wa.realm == "[email protected]"
assert "auth" in wa.qop
assert "auth-int" in wa.qop
assert wa.nonce == "dcd98b7102dd2f0e8b11d0f600bfb0c093"
assert wa.opaque == "5ccc069c403ebaf9f0171e9517f40e41"
wa = http.parse_www_authenticate_header("broken")
assert wa.type == "broken"
assert not http.parse_www_authenticate_header("").type
assert not http.parse_www_authenticate_header("")
def test_etags(self):
assert http.quote_etag("foo") == '"foo"'
assert http.quote_etag("foo", True) == 'W/"foo"'
assert http.unquote_etag('"foo"') == ("foo", False)
assert http.unquote_etag('W/"foo"') == ("foo", True)
es = http.parse_etags('"foo", "bar", W/"baz", blar')
assert sorted(es) == ["bar", "blar", "foo"]
assert "foo" in es
assert "baz" not in es
assert es.contains_weak("baz")
assert "blar" in es
assert es.contains_raw('W/"baz"')
assert es.contains_raw('"foo"')
assert sorted(es.to_header().split(", ")) == [
'"bar"',
'"blar"',
'"foo"',
'W/"baz"',
]
def test_etags_nonzero(self):
etags = http.parse_etags('W/"foo"')
assert bool(etags)
assert etags.contains_raw('W/"foo"')
def test_remove_entity_headers(self):
now = http.http_date()
headers1 = [
("Date", now),
("Content-Type", "text/html"),
("Content-Length", "0"),
]
headers2 = datastructures.Headers(headers1)
http.remove_entity_headers(headers1)
assert headers1 == [("Date", now)]
http.remove_entity_headers(headers2)
assert headers2 == datastructures.Headers([("Date", now)])
def test_remove_hop_by_hop_headers(self):
headers1 = [("Connection", "closed"), ("Foo", "bar"), ("Keep-Alive", "wtf")]
headers2 = datastructures.Headers(headers1)
http.remove_hop_by_hop_headers(headers1)
assert headers1 == [("Foo", "bar")]
http.remove_hop_by_hop_headers(headers2)
assert headers2 == datastructures.Headers([("Foo", "bar")])
def test_parse_options_header(self):
assert http.parse_options_header(None) == ("", {})
assert http.parse_options_header("") == ("", {})
assert http.parse_options_header(r'something; foo="other\"thing"') == (
"something",
{"foo": 'other"thing'},
)
assert http.parse_options_header(r'something; foo="other\"thing"; meh=42') == (
"something",
{"foo": 'other"thing', "meh": "42"},
)
assert http.parse_options_header(
r'something; foo="other\"thing"; meh=42; bleh'
) == ("something", {"foo": 'other"thing', "meh": "42", "bleh": None})
assert http.parse_options_header(
'something; foo="other;thing"; meh=42; bleh'
) == ("something", {"foo": "other;thing", "meh": "42", "bleh": None})
assert http.parse_options_header('something; foo="otherthing"; meh=; bleh') == (
"something",
{"foo": "otherthing", "meh": None, "bleh": None},
)
# Issue #404
assert http.parse_options_header(
'multipart/form-data; name="foo bar"; filename="bar foo"'
) == ("multipart/form-data", {"name": "foo bar", "filename": "bar foo"})
# Examples from RFC
assert http.parse_options_header("audio/*; q=0.2, audio/basic") == (
"audio/*",
{"q": "0.2"},
)
assert http.parse_options_header(
"text/plain; q=0.5, text/html\n text/x-dvi; q=0.8, text/x-c"
) == ("text/plain", {"q": "0.5"})
# Issue #932
assert http.parse_options_header(
"form-data; name=\"a_file\"; filename*=UTF-8''"
'"%c2%a3%20and%20%e2%82%ac%20rates"'
) == ("form-data", {"name": "a_file", "filename": "\xa3 and \u20ac rates"})
assert http.parse_options_header(
"form-data; name*=UTF-8''\"%C5%AAn%C4%ADc%C5%8Dde%CC%BD\"; "
'filename="some_file.txt"'
) == (
"form-data",
{"name": "\u016an\u012dc\u014dde\u033d", "filename": "some_file.txt"},
)
def test_parse_options_header_value_with_quotes(self):
assert http.parse_options_header(
'form-data; name="file"; filename="t\'es\'t.txt"'
) == ("form-data", {"name": "file", "filename": "t'es't.txt"})
assert http.parse_options_header(
"form-data; name=\"file\"; filename*=UTF-8''\"'🐍'.txt\""
) == ("form-data", {"name": "file", "filename": "'🐍'.txt"})
def test_parse_options_header_broken_values(self):
# Issue #995
assert http.parse_options_header(" ") == ("", {})
assert http.parse_options_header(" , ") == ("", {})
assert http.parse_options_header(" ; ") == ("", {})
assert http.parse_options_header(" ,; ") == ("", {})
assert http.parse_options_header(" , a ") == ("", {})
assert http.parse_options_header(" ; a ") == ("", {})
def test_parse_options_header_case_insensitive(self):
_, options = http.parse_options_header(r'something; fileName="File.ext"')
assert options["filename"] == "File.ext"
def test_dump_options_header(self):
assert http.dump_options_header("foo", {"bar": 42}) == "foo; bar=42"
assert http.dump_options_header("foo", {"bar": 42, "fizz": None}) in (
"foo; bar=42; fizz",
"foo; fizz; bar=42",
)
def test_dump_header(self):
assert http.dump_header([1, 2, 3]) == "1, 2, 3"
assert http.dump_header([1, 2, 3], allow_token=False) == '"1", "2", "3"'
assert http.dump_header({"foo": "bar"}, allow_token=False) == 'foo="bar"'
assert http.dump_header({"foo": "bar"}) == "foo=bar"
assert http.dump_header({"foo*": "UTF-8''bar"}) == "foo*=UTF-8''bar"
def test_is_resource_modified(self):
env = create_environ()
# any method is allowed
env["REQUEST_METHOD"] = "POST"
assert http.is_resource_modified(env, etag="testing")
env["REQUEST_METHOD"] = "GET"
# etagify from data
pytest.raises(TypeError, http.is_resource_modified, env, data="42", etag="23")
env["HTTP_IF_NONE_MATCH"] = http.generate_etag(b"awesome")
assert not http.is_resource_modified(env, data=b"awesome")
env["HTTP_IF_MODIFIED_SINCE"] = http.http_date(datetime(2008, 1, 1, 12, 30))
assert not http.is_resource_modified(
env, last_modified=datetime(2008, 1, 1, 12, 00)
)
assert http.is_resource_modified(
env, last_modified=datetime(2008, 1, 1, 13, 00)
)
def test_is_resource_modified_for_range_requests(self):
env = create_environ()
env["HTTP_IF_MODIFIED_SINCE"] = http.http_date(datetime(2008, 1, 1, 12, 30))
env["HTTP_IF_RANGE"] = http.generate_etag(b"awesome_if_range")
# Range header not present, so If-Range should be ignored
assert not http.is_resource_modified(
env,
data=b"not_the_same",
ignore_if_range=False,
last_modified=datetime(2008, 1, 1, 12, 30),
)
env["HTTP_RANGE"] = ""
assert not http.is_resource_modified(
env, data=b"awesome_if_range", ignore_if_range=False
)
assert http.is_resource_modified(
env, data=b"not_the_same", ignore_if_range=False
)
env["HTTP_IF_RANGE"] = http.http_date(datetime(2008, 1, 1, 13, 30))
assert http.is_resource_modified(
env, last_modified=datetime(2008, 1, 1, 14, 00), ignore_if_range=False
)
assert not http.is_resource_modified(
env, last_modified=datetime(2008, 1, 1, 13, 30), ignore_if_range=False
)
assert http.is_resource_modified(
env, last_modified=datetime(2008, 1, 1, 13, 30), ignore_if_range=True
)
def test_parse_cookie(self):
cookies = http.parse_cookie(
"dismiss-top=6; CP=null*; PHPSESSID=0a539d42abc001cdc762809248d4beed;"
'a=42; b="\\";"; ; fo234{=bar;blub=Blah; "__Secure-c"=d'
)
assert cookies.to_dict() == {
"CP": "null*",
"PHPSESSID": "0a539d42abc001cdc762809248d4beed",
"a": "42",
"dismiss-top": "6",
"b": '";',
"fo234{": "bar",
"blub": "Blah",
'"__Secure-c"': "d",
}
def test_dump_cookie(self):
rv = http.dump_cookie(
"foo", "bar baz blub", 360, httponly=True, sync_expires=False
)
assert set(rv.split("; ")) == {
"HttpOnly",
"Max-Age=360",
"Path=/",
'foo="bar baz blub"',
}
assert http.dump_cookie("key", "xxx/") == "key=xxx/; Path=/"
assert http.dump_cookie("key", "xxx=") == "key=xxx=; Path=/"
def test_bad_cookies(self):
cookies = http.parse_cookie(
"first=IamTheFirst ; a=1; oops ; a=2 ;second = andMeTwo;"
)
expect = {
"first": ["IamTheFirst"],
"a": ["1", "2"],
"oops": [""],
"second": ["andMeTwo"],
}
assert cookies.to_dict(flat=False) == expect
assert cookies["a"] == "1"
assert cookies.getlist("a") == ["1", "2"]
def test_empty_keys_are_ignored(self):
cookies = http.parse_cookie("spam=ham; duck=mallard; ; ")
expect = {"spam": "ham", "duck": "mallard"}
assert cookies.to_dict() == expect
def test_cookie_quoting(self):
val = http.dump_cookie("foo", "?foo")
assert val == 'foo="?foo"; Path=/'
assert http.parse_cookie(val).to_dict() == {"foo": "?foo", "Path": "/"}
assert http.parse_cookie(r'foo="foo\054bar"').to_dict(), {"foo": "foo,bar"}
def test_parse_set_cookie_directive(self):
val = 'foo="?foo"; version="0.1";'
assert http.parse_cookie(val).to_dict() == {"foo": "?foo", "version": "0.1"}
def test_cookie_domain_resolving(self):
val = http.dump_cookie("foo", "bar", domain="\N{SNOWMAN}.com")
assert val == "foo=bar; Domain=xn--n3h.com; Path=/"
def test_cookie_unicode_dumping(self):
val = http.dump_cookie("foo", "\N{SNOWMAN}")
h = datastructures.Headers()
h.add("Set-Cookie", val)
assert h["Set-Cookie"] == 'foo="\\342\\230\\203"; Path=/'
cookies = http.parse_cookie(h["Set-Cookie"])
assert cookies["foo"] == "\N{SNOWMAN}"
def test_cookie_unicode_keys(self):
# Yes, this is technically against the spec but happens
val = http.dump_cookie("fö", "fö")
assert val == _wsgi_encoding_dance('fö="f\\303\\266"; Path=/', "utf-8")
cookies = http.parse_cookie(val)
assert cookies["fö"] == "fö"
def test_cookie_unicode_parsing(self):
# This is submitted by Firefox if you set a Unicode cookie.
cookies = http.parse_cookie("fö=fö")
assert cookies["fö"] == "fö"
def test_cookie_domain_encoding(self):
val = http.dump_cookie("foo", "bar", domain="\N{SNOWMAN}.com")
assert val == "foo=bar; Domain=xn--n3h.com; Path=/"
val = http.dump_cookie("foo", "bar", domain=".\N{SNOWMAN}.com")
assert val == "foo=bar; Domain=.xn--n3h.com; Path=/"
val = http.dump_cookie("foo", "bar", domain=".foo.com")
assert val == "foo=bar; Domain=.foo.com; Path=/"
def test_cookie_maxsize(self, recwarn):
val = http.dump_cookie("foo", "bar" * 1360 + "b")
assert len(recwarn) == 0
assert len(val) == 4093
http.dump_cookie("foo", "bar" * 1360 + "ba")
assert len(recwarn) == 1
w = recwarn.pop()
assert "cookie is too large" in str(w.message)
http.dump_cookie("foo", b"w" * 502, max_size=512)
assert len(recwarn) == 1
w = recwarn.pop()
assert "the limit is 512 bytes" in str(w.message)
@pytest.mark.parametrize(
("samesite", "expected"),
(
("strict", "foo=bar; Path=/; SameSite=Strict"),
("lax", "foo=bar; Path=/; SameSite=Lax"),
("none", "foo=bar; Path=/; SameSite=None"),
(None, "foo=bar; Path=/"),
),
)
def test_cookie_samesite_attribute(self, samesite, expected):
value = http.dump_cookie("foo", "bar", samesite=samesite)
assert value == expected
def test_cookie_samesite_invalid(self):
with pytest.raises(ValueError):
http.dump_cookie("foo", "bar", samesite="invalid")
class TestRange:
def test_if_range_parsing(self):
rv = http.parse_if_range_header('"Test"')
assert rv.etag == "Test"
assert rv.date is None
assert rv.to_header() == '"Test"'
# weak information is dropped
rv = http.parse_if_range_header('W/"Test"')
assert rv.etag == "Test"
assert rv.date is None
assert rv.to_header() == '"Test"'
# broken etags are supported too
rv = http.parse_if_range_header("bullshit")
assert rv.etag == "bullshit"
assert rv.date is None
assert rv.to_header() == '"bullshit"'
rv = http.parse_if_range_header("Thu, 01 Jan 1970 00:00:00 GMT")
assert rv.etag is None
assert rv.date == datetime(1970, 1, 1, tzinfo=timezone.utc)
assert rv.to_header() == "Thu, 01 Jan 1970 00:00:00 GMT"
for x in "", None:
rv = http.parse_if_range_header(x)
assert rv.etag is None
assert rv.date is None
assert rv.to_header() == ""
def test_range_parsing(self):
rv = http.parse_range_header("bytes=52")
assert rv is None
rv = http.parse_range_header("bytes=52-")
assert rv.units == "bytes"
assert rv.ranges == [(52, None)]
assert rv.to_header() == "bytes=52-"
rv = http.parse_range_header("bytes=52-99")
assert rv.units == "bytes"
assert rv.ranges == [(52, 100)]
assert rv.to_header() == "bytes=52-99"
rv = http.parse_range_header("bytes=52-99,-1000")
assert rv.units == "bytes"
assert rv.ranges == [(52, 100), (-1000, None)]
assert rv.to_header() == "bytes=52-99,-1000"
rv = http.parse_range_header("bytes = 1 - 100")
assert rv.units == "bytes"
assert rv.ranges == [(1, 101)]
assert rv.to_header() == "bytes=1-100"
rv = http.parse_range_header("AWesomes=0-999")
assert rv.units == "awesomes"
assert rv.ranges == [(0, 1000)]
assert rv.to_header() == "awesomes=0-999"
rv = http.parse_range_header("bytes=-")
assert rv is None
rv = http.parse_range_header("bytes=bad")
assert rv is None
rv = http.parse_range_header("bytes=bad-1")
assert rv is None
rv = http.parse_range_header("bytes=-bad")
assert rv is None
rv = http.parse_range_header("bytes=52-99, bad")
assert rv is None
def test_content_range_parsing(self):
rv = http.parse_content_range_header("bytes 0-98/*")
assert rv.units == "bytes"
assert rv.start == 0
assert rv.stop == 99
assert rv.length is None
assert rv.to_header() == "bytes 0-98/*"
rv = http.parse_content_range_header("bytes 0-98/*asdfsa")
assert rv is None
rv = http.parse_content_range_header("bytes */-1")
assert rv is None
rv = http.parse_content_range_header("bytes 0-99/100")
assert rv.to_header() == "bytes 0-99/100"
rv.start = None
rv.stop = None
assert rv.units == "bytes"
assert rv.to_header() == "bytes */100"
rv = http.parse_content_range_header("bytes */100")
assert rv.start is None
assert rv.stop is None
assert rv.length == 100
assert rv.units == "bytes"
class TestRegression:
def test_best_match_works(self):
# was a bug in 0.6
rv = http.parse_accept_header(
"foo=,application/xml,application/xhtml+xml,"
"text/html;q=0.9,text/plain;q=0.8,"
"image/png,*/*;q=0.5",
datastructures.MIMEAccept,
).best_match(["foo/bar"])
assert rv == "foo/bar"
@pytest.mark.parametrize(
"value",
[
"Basic V2Vya3pldWc6V2VrcnpldWc=",
'Digest username=Mufasa, realm="[email protected]",'
' nonce=dcd98b7102dd2f0e8b11d0f600bfb0c093, uri="/dir/index.html", qop=auth,'
" nc=00000001, cnonce=0a4f113b, response=6629fae49393a05397450978507c4ef1,"
" opaque=5ccc069c403ebaf9f0171e9517f40e41",
],
)
def test_authorization_to_header(value: str) -> None:
parsed = http.parse_authorization_header(value)
assert parsed is not None
assert parsed.to_header() == value
@pytest.mark.parametrize(
("value", "expect"),
[
(
"Sun, 06 Nov 1994 08:49:37 GMT ",
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
),
(
"Sunday, 06-Nov-94 08:49:37 GMT",
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
),
(
" Sun Nov 6 08:49:37 1994",
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
),
("foo", None),
(
" Sun 02 Feb 1343 08:49:37 GMT",
datetime(1343, 2, 2, 8, 49, 37, tzinfo=timezone.utc),
),
(
"Thu, 01 Jan 1970 00:00:00 GMT",
datetime(1970, 1, 1, tzinfo=timezone.utc),
),
("Thu, 33 Jan 1970 00:00:00 GMT", None),
],
)
def test_parse_date(value, expect):
assert http.parse_date(value) == expect
@pytest.mark.parametrize(
("value", "expect"),
[
(
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
"Sun, 06 Nov 1994 08:49:37 GMT",
),
(
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone(timedelta(hours=-8))),
"Sun, 06 Nov 1994 16:49:37 GMT",
),
(datetime(1994, 11, 6, 8, 49, 37), "Sun, 06 Nov 1994 08:49:37 GMT"),
(0, "Thu, 01 Jan 1970 00:00:00 GMT"),
(datetime(1970, 1, 1), "Thu, 01 Jan 1970 00:00:00 GMT"),
(datetime(1, 1, 1), "Mon, 01 Jan 0001 00:00:00 GMT"),
(datetime(999, 1, 1), "Tue, 01 Jan 0999 00:00:00 GMT"),
(datetime(1000, 1, 1), "Wed, 01 Jan 1000 00:00:00 GMT"),
(datetime(2020, 1, 1), "Wed, 01 Jan 2020 00:00:00 GMT"),
(date(2020, 1, 1), "Wed, 01 Jan 2020 00:00:00 GMT"),
],
)
def test_http_date(value, expect):
assert http.http_date(value) == expect
|
GHSA-px8h-6qxv-m22q
|
tests/compiler/ir/test_optimize_ir.py
|
@@ -143,7 +143,9 @@
(["sub", "x", 0], ["x"]),
(["sub", "x", "x"], [0]),
(["sub", ["sload", 0], ["sload", 0]], None),
- (["sub", ["callvalue"], ["callvalue"]], None),
+ (["sub", ["callvalue"], ["callvalue"]], [0]),
+ (["sub", ["msize"], ["msize"]], None),
+ (["sub", ["gas"], ["gas"]], None),
(["sub", -1, ["sload", 0]], ["not", ["sload", 0]]),
(["mul", "x", 1], ["x"]),
(["div", "x", 1], ["x"]),
@@ -210,7 +212,9 @@
(["eq", -1, ["add", -(2**255), 2**255 - 1]], [1]), # test compile-time wrapping
(["eq", -2, ["add", 2**256 - 1, 2**256 - 1]], [1]), # test compile-time wrapping
(["eq", "x", "x"], [1]),
- (["eq", "callvalue", "callvalue"], None),
+ (["eq", "gas", "gas"], None),
+ (["eq", "msize", "msize"], None),
+ (["eq", "callvalue", "callvalue"], [1]),
(["ne", "x", "x"], [0]),
]
|
import pytest
from vyper.codegen.ir_node import IRnode
from vyper.exceptions import StaticAssertionException
from vyper.ir import optimizer
optimize_list = [
(["eq", 1, 2], [0]),
(["lt", 1, 2], [1]),
(["eq", "x", 0], ["iszero", "x"]),
(["ne", "x", 0], ["iszero", ["iszero", "x"]]),
(["ne", "x", 1], None),
(["iszero", ["ne", "x", 1]], ["iszero", ["iszero", ["iszero", ["xor", "x", 1]]]]),
(["eq", ["sload", 0], 0], ["iszero", ["sload", 0]]),
# branch pruner
(["if", ["eq", 1, 2], "pass"], ["seq"]),
(["if", ["eq", 1, 1], 3, 4], [3]),
(["if", ["eq", 1, 2], 3, 4], [4]),
(["seq", ["assert", ["lt", 1, 2]]], ["seq"]),
(["seq", ["assert", ["lt", 1, 2]], 2], [2]),
# condition rewriter
(["if", ["eq", "x", "y"], "pass"], ["if", ["iszero", ["xor", "x", "y"]], "pass"]),
(["if", "cond", 1, 0], ["if", ["iszero", "cond"], 0, 1]),
(["if", ["ne", "x", 1], [1]], None),
(
# TODO: this is perf issue (codegen should usually generate `if (ne x y)` though)
["if", ["iszero", ["eq", "x", "y"]], [1]],
["if", ["iszero", ["iszero", ["xor", "x", "y"]]], 1],
),
(["assert", ["eq", "x", "y"]], ["assert", ["iszero", ["xor", "x", "y"]]]),
(["assert", ["ne", "x", "y"]], None),
# nesting
(["mstore", 0, ["eq", 1, 2]], ["mstore", 0, 0]),
# conditions
(["ge", "x", 0], [1]), # x >= 0 == True
(["ge", ["sload", 0], 0], None), # no-op
(["gt", "x", 2**256 - 1], [0]), # x >= MAX_UINT256 == False
# (x > 0) => x == 0
(["iszero", ["gt", "x", 0]], ["iszero", ["iszero", ["iszero", "x"]]]),
# !(x < MAX_UINT256) => x == MAX_UINT256
(["iszero", ["lt", "x", 2**256 - 1]], ["iszero", ["iszero", ["iszero", ["not", "x"]]]]),
# !(x < MAX_INT256) => x == MAX_INT256
(
["iszero", ["slt", "x", 2**255 - 1]],
["iszero", ["iszero", ["iszero", ["xor", "x", 2**255 - 1]]]],
),
# !(x > MIN_INT256) => x == MIN_INT256
(
["iszero", ["sgt", "x", -(2**255)]],
["iszero", ["iszero", ["iszero", ["xor", "x", -(2**255)]]]],
),
(["sgt", "x", 2**255 - 1], [0]), # signed x > MAX_INT256 == False
(["sge", "x", 2**255 - 1], ["eq", "x", 2**255 - 1]),
(["eq", -1, "x"], ["iszero", ["not", "x"]]),
(["iszero", ["eq", -1, "x"]], ["iszero", ["iszero", ["not", "x"]]]),
(["le", "x", 0], ["iszero", "x"]),
(["le", 0, "x"], [1]),
(["le", 0, ["sload", 0]], None), # no-op
(["ge", "x", 0], [1]),
(["le", "x", "x"], [1]),
(["ge", "x", "x"], [1]),
(["sle", "x", "x"], [1]),
(["sge", "x", "x"], [1]),
(["lt", "x", "x"], [0]),
(["gt", "x", "x"], [0]),
(["slt", "x", "x"], [0]),
(["sgt", "x", "x"], [0]),
# boundary conditions
(["slt", "x", -(2**255)], [0]),
(["sle", "x", -(2**255)], ["eq", "x", -(2**255)]),
(["lt", "x", 2**256 - 1], None),
(["le", "x", 2**256 - 1], [1]),
(["gt", "x", 0], ["iszero", ["iszero", "x"]]),
# x < 0 => false
(["lt", "x", 0], [0]),
# 0 < x => x != 0
(["lt", 0, "x"], ["iszero", ["iszero", "x"]]),
(["gt", 5, "x"], None),
# x < 1 => x == 0
(["lt", "x", 1], ["iszero", "x"]),
(["slt", "x", 1], None),
(["gt", "x", 1], None),
(["sgt", "x", 1], None),
(["gt", "x", 2**256 - 2], ["iszero", ["not", "x"]]),
(["lt", "x", 2**256 - 2], None),
(["slt", "x", 2**256 - 2], None),
(["sgt", "x", 2**256 - 2], None),
(["slt", "x", -(2**255) + 1], ["eq", "x", -(2**255)]),
(["sgt", "x", -(2**255) + 1], None),
(["lt", "x", -(2**255) + 1], None),
(["gt", "x", -(2**255) + 1], None),
(["sgt", "x", 2**255 - 2], ["eq", "x", 2**255 - 1]),
(["slt", "x", 2**255 - 2], None),
(["gt", "x", 2**255 - 2], None),
(["lt", "x", 2**255 - 2], None),
# 5 > x; x < 5; x <= 4
(["iszero", ["gt", 5, "x"]], ["iszero", ["le", "x", 4]]),
(["iszero", ["ge", 5, "x"]], None),
# 5 >= x; x <= 5; x < 6
(["ge", 5, "x"], ["lt", "x", 6]),
(["lt", 5, "x"], None),
# 5 < x; x > 5; x >= 6
(["iszero", ["lt", 5, "x"]], ["iszero", ["ge", "x", 6]]),
(["iszero", ["le", 5, "x"]], None),
# 5 <= x; x >= 5; x > 4
(["le", 5, "x"], ["gt", "x", 4]),
(["sgt", 5, "x"], None),
# 5 > x; x < 5; x <= 4
(["iszero", ["sgt", 5, "x"]], ["iszero", ["sle", "x", 4]]),
(["iszero", ["sge", 5, "x"]], None),
# 5 >= x; x <= 5; x < 6
(["sge", 5, "x"], ["slt", "x", 6]),
(["slt", 5, "x"], None),
# 5 < x; x > 5; x >= 6
(["iszero", ["slt", 5, "x"]], ["iszero", ["sge", "x", 6]]),
(["iszero", ["sle", 5, "x"]], None),
# 5 <= x; x >= 5; x > 4
(["sle", 5, "x"], ["sgt", "x", 4]),
# tricky constant folds
(["sgt", 2**256 - 1, 0], [0]), # -1 > 0
(["gt", 2**256 - 1, 0], [1]), # -1 > 0
(["gt", 2**255, 0], [1]), # 0x80 > 0
(["sgt", 2**255, 0], [0]), # 0x80 > 0
(["sgt", 2**255, 2**255 - 1], [0]), # 0x80 > 0x81
(["gt", -(2**255), 2**255 - 1], [1]), # 0x80 > 0x81
(["slt", 2**255, 2**255 - 1], [1]), # 0x80 < 0x7f
(["lt", -(2**255), 2**255 - 1], [0]), # 0x80 < 0x7f
(["sle", -1, 2**256 - 1], [1]), # -1 <= -1
(["sge", -(2**255), 2**255], [1]), # 0x80 >= 0x80
(["sgt", -(2**255), 2**255], [0]), # 0x80 > 0x80
(["slt", 2**255, -(2**255)], [0]), # 0x80 < 0x80
# arithmetic
(["ceil32", "x"], None),
(["ceil32", 0], [0]),
(["ceil32", 1], [32]),
(["ceil32", 32], [32]),
(["ceil32", 33], [64]),
(["ceil32", 95], [96]),
(["ceil32", 96], [96]),
(["ceil32", 97], [128]),
(["add", "x", 0], ["x"]),
(["add", 0, "x"], ["x"]),
(["sub", "x", 0], ["x"]),
(["sub", "x", "x"], [0]),
(["sub", ["sload", 0], ["sload", 0]], None),
(["sub", ["callvalue"], ["callvalue"]], None),
(["sub", -1, ["sload", 0]], ["not", ["sload", 0]]),
(["mul", "x", 1], ["x"]),
(["div", "x", 1], ["x"]),
(["sdiv", "x", 1], ["x"]),
(["mod", "x", 1], [0]),
(["mod", ["sload", 0], 1], None),
(["smod", "x", 1], [0]),
(["mul", "x", -1], ["sub", 0, "x"]),
(["sdiv", "x", -1], ["sub", 0, "x"]),
(["mul", "x", 0], [0]),
(["mul", ["sload", 0], 0], None),
(["div", "x", 0], [0]),
(["div", ["sload", 0], 0], None),
(["sdiv", "x", 0], [0]),
(["sdiv", ["sload", 0], 0], None),
(["mod", "x", 0], [0]),
(["mod", ["sload", 0], 0], None),
(["smod", "x", 0], [0]),
(["mul", "x", 32], ["shl", 5, "x"]),
(["div", "x", 64], ["shr", 6, "x"]),
(["mod", "x", 128], ["and", "x", 127]),
(["sdiv", "x", 64], None),
(["smod", "x", 64], None),
(["exp", 3, 5], [3**5]),
(["exp", 3, 256], [(3**256) % (2**256)]),
(["exp", 2, 257], [0]),
(["exp", "x", 0], [1]),
(["exp", "x", 1], ["x"]),
(["exp", 1, "x"], [1]),
(["exp", 0, "x"], ["iszero", "x"]),
# bitwise ops
(["xor", "x", 2**256 - 1], ["not", "x"]),
(["and", "x", 2**256 - 1], ["x"]),
(["or", "x", 2**256 - 1], [2**256 - 1]),
(["shr", 0, "x"], ["x"]),
(["sar", 0, "x"], ["x"]),
(["shl", 0, "x"], ["x"]),
(["shr", 256, "x"], None),
(["sar", 256, "x"], None),
(["shl", 256, "x"], None),
(["and", 1, 2], [0]),
(["or", 1, 2], [3]),
(["xor", 1, 2], [3]),
(["xor", 3, 2], [1]),
(["and", 0, "x"], [0]),
(["and", "x", 0], [0]),
(["or", "x", 0], ["x"]),
(["or", 0, "x"], ["x"]),
(["xor", "x", 0], ["x"]),
(["xor", "x", 1], None),
(["and", "x", 1], None),
(["or", "x", 1], None),
(["xor", 0, "x"], ["x"]),
(["xor", "x", "x"], [0]),
(["iszero", ["or", "x", 1]], [0]),
(["iszero", ["or", 2, "x"]], [0]),
(["iszero", ["or", 1, ["sload", 0]]], None),
# nested optimizations
(["eq", 0, ["sub", 1, 1]], [1]),
(["eq", 0, ["add", 2**255, 2**255]], [1]), # test compile-time wrapping
(["eq", 0, ["add", 2**255, -(2**255)]], [1]), # test compile-time wrapping
(["eq", -1, ["add", 0, -1]], [1]), # test compile-time wrapping
(["eq", -1, ["add", 2**255, 2**255 - 1]], [1]), # test compile-time wrapping
(["eq", -1, ["add", -(2**255), 2**255 - 1]], [1]), # test compile-time wrapping
(["eq", -2, ["add", 2**256 - 1, 2**256 - 1]], [1]), # test compile-time wrapping
(["eq", "x", "x"], [1]),
(["eq", "callvalue", "callvalue"], None),
(["ne", "x", "x"], [0]),
]
@pytest.mark.parametrize("ir", optimize_list)
def test_ir_optimizer(ir):
optimized = optimizer.optimize(IRnode.from_list(ir[0]))
optimized.repr_show_gas = True
if ir[1] is None:
# no-op, assert optimizer does nothing
expected = IRnode.from_list(ir[0])
else:
expected = IRnode.from_list(ir[1])
expected.repr_show_gas = True
optimized.annotation = None
assert optimized == expected
static_assertions_list = [
["assert", ["eq", 2, 1]],
["assert", ["ne", 1, 1]],
["assert", ["sub", 1, 1]],
["assert", ["lt", 2, 1]],
["assert", ["lt", 1, 1]],
["assert", ["lt", "x", 0]], # +x < 0
["assert", ["le", 1, 0]],
["assert", ["le", 2**256 - 1, 0]],
["assert", ["gt", 1, 2]],
["assert", ["gt", 1, 1]],
["assert", ["gt", 0, 2**256 - 1]],
["assert", ["gt", "x", 2**256 - 1]],
["assert", ["ge", 1, 2]],
["assert", ["ge", 1, 2]],
["assert", ["slt", 2, 1]],
["assert", ["slt", 1, 1]],
["assert", ["slt", 0, 2**256 - 1]], # 0 < -1
["assert", ["slt", -(2**255), 2**255]], # 0x80 < 0x80
["assert", ["sle", 0, 2**255]], # 0 < 0x80
["assert", ["sgt", 1, 2]],
["assert", ["sgt", 1, 1]],
["assert", ["sgt", 2**256 - 1, 0]], # -1 > 0
["assert", ["sgt", 2**255, -(2**255)]], # 0x80 > 0x80
["assert", ["sge", 2**255, 0]], # 0x80 > 0
]
@pytest.mark.parametrize("ir", static_assertions_list)
def test_static_assertions(ir, assert_compile_failed):
ir = IRnode.from_list(ir)
assert_compile_failed(lambda: optimizer.optimize(ir), StaticAssertionException)
def test_operator_set_values():
# some sanity checks
assert optimizer.COMPARISON_OPS == {"lt", "gt", "le", "ge", "slt", "sgt", "sle", "sge"}
assert optimizer.STRICT_COMPARISON_OPS == {"lt", "gt", "slt", "sgt"}
assert optimizer.UNSTRICT_COMPARISON_OPS == {"le", "ge", "sle", "sge"}
|
GHSA-c647-pxm2-c52w
|
tests/parser/functions/test_create_functions.py
|
@@ -431,3 +431,212 @@ def test2(target: address, salt: bytes32) -> address:
# test2 = c.test2(b"\x01", salt)
# assert HexBytes(test2) == create2_address_of(c.address, salt, vyper_initcode(b"\x01"))
# assert_tx_failed(lambda: c.test2(bytecode, salt))
+
+
+# XXX: these various tests to check the msize allocator for
+# create_copy_of and create_from_blueprint depend on calling convention
+# and variables writing to memory. think of ways to make more robust to
+# changes in calling convention and memory layout
[email protected]("blueprint_prefix", [b"", b"\xfe", b"\xfe\71\x00"])
+def test_create_from_blueprint_complex_value(
+ get_contract, deploy_blueprint_for, w3, blueprint_prefix
+):
+ # check msize allocator does not get trampled by value= kwarg
+ code = """
+var: uint256
+
+@external
+@payable
+def __init__(x: uint256):
+ self.var = x
+
+@external
+def foo()-> uint256:
+ return self.var
+ """
+
+ prefix_len = len(blueprint_prefix)
+
+ some_constant = b"\00" * 31 + b"\x0c"
+
+ deployer_code = f"""
+created_address: public(address)
+x: constant(Bytes[32]) = {some_constant}
+
+@internal
+def foo() -> uint256:
+ g:uint256 = 42
+ return 3
+
+@external
+@payable
+def test(target: address):
+ self.created_address = create_from_blueprint(
+ target,
+ x,
+ code_offset={prefix_len},
+ value=self.foo(),
+ raw_args=True
+ )
+ """
+
+ foo_contract = get_contract(code, 12)
+ expected_runtime_code = w3.eth.get_code(foo_contract.address)
+
+ f, FooContract = deploy_blueprint_for(code, initcode_prefix=blueprint_prefix)
+
+ d = get_contract(deployer_code)
+
+ d.test(f.address, transact={"value": 3})
+
+ test = FooContract(d.created_address())
+ assert w3.eth.get_code(test.address) == expected_runtime_code
+ assert test.foo() == 12
+
+
[email protected]("blueprint_prefix", [b"", b"\xfe", b"\xfe\71\x00"])
+def test_create_from_blueprint_complex_salt_raw_args(
+ get_contract, deploy_blueprint_for, w3, blueprint_prefix
+):
+ # test msize allocator does not get trampled by salt= kwarg
+ code = """
+var: uint256
+
+@external
+@payable
+def __init__(x: uint256):
+ self.var = x
+
+@external
+def foo()-> uint256:
+ return self.var
+ """
+
+ some_constant = b"\00" * 31 + b"\x0c"
+ prefix_len = len(blueprint_prefix)
+
+ deployer_code = f"""
+created_address: public(address)
+
+x: constant(Bytes[32]) = {some_constant}
+salt: constant(bytes32) = keccak256("kebab")
+
+@internal
+def foo() -> bytes32:
+ g:uint256 = 42
+ return salt
+
+@external
+@payable
+def test(target: address):
+ self.created_address = create_from_blueprint(
+ target,
+ x,
+ code_offset={prefix_len},
+ salt=self.foo(),
+ raw_args= True
+ )
+ """
+
+ foo_contract = get_contract(code, 12)
+ expected_runtime_code = w3.eth.get_code(foo_contract.address)
+
+ f, FooContract = deploy_blueprint_for(code, initcode_prefix=blueprint_prefix)
+
+ d = get_contract(deployer_code)
+
+ d.test(f.address, transact={})
+
+ test = FooContract(d.created_address())
+ assert w3.eth.get_code(test.address) == expected_runtime_code
+ assert test.foo() == 12
+
+
[email protected]("blueprint_prefix", [b"", b"\xfe", b"\xfe\71\x00"])
+def test_create_from_blueprint_complex_salt_no_constructor_args(
+ get_contract, deploy_blueprint_for, w3, blueprint_prefix
+):
+ # test msize allocator does not get trampled by salt= kwarg
+ code = """
+var: uint256
+
+@external
+@payable
+def __init__():
+ self.var = 12
+
+@external
+def foo()-> uint256:
+ return self.var
+ """
+
+ prefix_len = len(blueprint_prefix)
+ deployer_code = f"""
+created_address: public(address)
+
+salt: constant(bytes32) = keccak256("kebab")
+
+@external
+@payable
+def test(target: address):
+ self.created_address = create_from_blueprint(
+ target,
+ code_offset={prefix_len},
+ salt=keccak256(_abi_encode(target))
+ )
+ """
+
+ foo_contract = get_contract(code)
+ expected_runtime_code = w3.eth.get_code(foo_contract.address)
+
+ f, FooContract = deploy_blueprint_for(code, initcode_prefix=blueprint_prefix)
+
+ d = get_contract(deployer_code)
+
+ d.test(f.address, transact={})
+
+ test = FooContract(d.created_address())
+ assert w3.eth.get_code(test.address) == expected_runtime_code
+ assert test.foo() == 12
+
+
+def test_create_copy_of_complex_kwargs(get_contract, w3):
+ # test msize allocator does not get trampled by salt= kwarg
+ complex_salt = """
+created_address: public(address)
+
+@external
+def test(target: address) -> address:
+ self.created_address = create_copy_of(
+ target,
+ salt=keccak256(_abi_encode(target))
+ )
+ return self.created_address
+
+ """
+
+ c = get_contract(complex_salt)
+ bytecode = w3.eth.get_code(c.address)
+ c.test(c.address, transact={})
+ test1 = c.created_address()
+ assert w3.eth.get_code(test1) == bytecode
+
+ # test msize allocator does not get trampled by value= kwarg
+ complex_value = """
+created_address: public(address)
+
+@external
+@payable
+def test(target: address) -> address:
+ value: uint256 = 2
+ self.created_address = create_copy_of(target, value = [2,2,2][value])
+ return self.created_address
+
+ """
+
+ c = get_contract(complex_value)
+ bytecode = w3.eth.get_code(c.address)
+
+ c.test(c.address, transact={"value": 2})
+ test1 = c.created_address()
+ assert w3.eth.get_code(test1) == bytecode
|
import pytest
import rlp
from eth.codecs import abi
from hexbytes import HexBytes
import vyper.ir.compile_ir as compile_ir
from vyper.codegen.ir_node import IRnode
from vyper.compiler.settings import OptimizationLevel
from vyper.utils import EIP_170_LIMIT, checksum_encode, keccak256
# initcode used by create_minimal_proxy_to
def eip1167_initcode(_addr):
addr = HexBytes(_addr)
pre = HexBytes("0x602D3D8160093D39F3363d3d373d3d3d363d73")
post = HexBytes("0x5af43d82803e903d91602b57fd5bf3")
return HexBytes(pre + (addr + HexBytes(0) * (20 - len(addr))) + post)
# initcode used by CreateCopyOf
def vyper_initcode(runtime_bytecode):
bytecode_len_hex = hex(len(runtime_bytecode))[2:].rjust(6, "0")
return HexBytes("0x62" + bytecode_len_hex + "3d81600b3d39f3") + runtime_bytecode
def test_create_minimal_proxy_to_create(get_contract):
code = """
main: address
@external
def test() -> address:
self.main = create_minimal_proxy_to(self)
return self.main
"""
c = get_contract(code)
address_bits = int(c.address, 16)
nonce = 1
rlp_encoded = rlp.encode([address_bits, nonce])
expected_create_address = keccak256(rlp_encoded)[12:].rjust(20, b"\x00")
assert c.test() == checksum_encode("0x" + expected_create_address.hex())
def test_create_minimal_proxy_to_call(get_contract, w3):
code = """
interface SubContract:
def hello() -> Bytes[100]: view
other: public(address)
@external
def test() -> address:
self.other = create_minimal_proxy_to(self)
return self.other
@external
def hello() -> Bytes[100]:
return b"hello world!"
@external
def test2() -> Bytes[100]:
return SubContract(self.other).hello()
"""
c = get_contract(code)
assert c.hello() == b"hello world!"
c.test(transact={})
assert c.test2() == b"hello world!"
def test_minimal_proxy_exception(w3, get_contract, assert_tx_failed):
code = """
interface SubContract:
def hello(a: uint256) -> Bytes[100]: view
other: public(address)
@external
def test() -> address:
self.other = create_minimal_proxy_to(self)
return self.other
@external
def hello(a: uint256) -> Bytes[100]:
assert a > 0, "invaliddddd"
return b"hello world!"
@external
def test2(a: uint256) -> Bytes[100]:
return SubContract(self.other).hello(a)
"""
c = get_contract(code)
assert c.hello(1) == b"hello world!"
c.test(transact={})
assert c.test2(1) == b"hello world!"
assert_tx_failed(lambda: c.test2(0))
GAS_SENT = 30000
tx_hash = c.test2(0, transact={"gas": GAS_SENT})
receipt = w3.eth.get_transaction_receipt(tx_hash)
assert receipt["status"] == 0
assert receipt["gasUsed"] < GAS_SENT
def test_create_minimal_proxy_to_create2(
get_contract, create2_address_of, keccak, assert_tx_failed
):
code = """
main: address
@external
def test(_salt: bytes32) -> address:
self.main = create_minimal_proxy_to(self, salt=_salt)
return self.main
"""
c = get_contract(code)
salt = keccak(b"vyper")
assert HexBytes(c.test(salt)) == create2_address_of(
c.address, salt, eip1167_initcode(c.address)
)
c.test(salt, transact={})
# revert on collision
assert_tx_failed(lambda: c.test(salt, transact={}))
# test blueprints with various prefixes - 0xfe would block calls to the blueprint
# contract, and 0xfe7100 is ERC5202 magic
@pytest.mark.parametrize("blueprint_prefix", [b"", b"\xfe", b"\xfe\71\x00"])
def test_create_from_blueprint(
get_contract,
deploy_blueprint_for,
w3,
keccak,
create2_address_of,
assert_tx_failed,
blueprint_prefix,
):
code = """
@external
def foo() -> uint256:
return 123
"""
prefix_len = len(blueprint_prefix)
deployer_code = f"""
created_address: public(address)
@external
def test(target: address):
self.created_address = create_from_blueprint(target, code_offset={prefix_len})
@external
def test2(target: address, salt: bytes32):
self.created_address = create_from_blueprint(target, code_offset={prefix_len}, salt=salt)
"""
# deploy a foo so we can compare its bytecode with factory deployed version
foo_contract = get_contract(code)
expected_runtime_code = w3.eth.get_code(foo_contract.address)
f, FooContract = deploy_blueprint_for(code, initcode_prefix=blueprint_prefix)
d = get_contract(deployer_code)
d.test(f.address, transact={})
test = FooContract(d.created_address())
assert w3.eth.get_code(test.address) == expected_runtime_code
assert test.foo() == 123
# extcodesize check
zero_address = "0x" + "00" * 20
assert_tx_failed(lambda: d.test(zero_address))
# now same thing but with create2
salt = keccak(b"vyper")
d.test2(f.address, salt, transact={})
test = FooContract(d.created_address())
assert w3.eth.get_code(test.address) == expected_runtime_code
assert test.foo() == 123
# check if the create2 address matches our offchain calculation
initcode = w3.eth.get_code(f.address)
initcode = initcode[len(blueprint_prefix) :] # strip the prefix
assert HexBytes(test.address) == create2_address_of(d.address, salt, initcode)
# can't collide addresses
assert_tx_failed(lambda: d.test2(f.address, salt))
def test_create_from_blueprint_bad_code_offset(
get_contract, get_contract_from_ir, deploy_blueprint_for, w3, assert_tx_failed
):
deployer_code = """
BLUEPRINT: immutable(address)
@external
def __init__(blueprint_address: address):
BLUEPRINT = blueprint_address
@external
def test(code_ofst: uint256) -> address:
return create_from_blueprint(BLUEPRINT, code_offset=code_ofst)
"""
initcode_len = 100
# deploy a blueprint contract whose contained initcode contains only
# zeroes (so no matter which offset, create_from_blueprint will
# return empty code)
ir = IRnode.from_list(["deploy", 0, ["seq"] + ["stop"] * initcode_len, 0])
bytecode, _ = compile_ir.assembly_to_evm(
compile_ir.compile_to_assembly(ir, optimize=OptimizationLevel.NONE)
)
# manually deploy the bytecode
c = w3.eth.contract(abi=[], bytecode=bytecode)
deploy_transaction = c.constructor()
tx_info = {"from": w3.eth.accounts[0], "value": 0, "gasPrice": 0}
tx_hash = deploy_transaction.transact(tx_info)
blueprint_address = w3.eth.get_transaction_receipt(tx_hash)["contractAddress"]
blueprint_code = w3.eth.get_code(blueprint_address)
print("BLUEPRINT CODE:", blueprint_code)
d = get_contract(deployer_code, blueprint_address)
# deploy with code_ofst=0 fine
d.test(0)
# deploy with code_ofst=len(blueprint) - 1 fine
d.test(initcode_len - 1)
# code_offset=len(blueprint) NOT fine! would EXTCODECOPY empty initcode
assert_tx_failed(lambda: d.test(initcode_len))
# code_offset=EIP_170_LIMIT definitely not fine!
assert_tx_failed(lambda: d.test(EIP_170_LIMIT))
# test create_from_blueprint with args
def test_create_from_blueprint_args(
get_contract, deploy_blueprint_for, w3, keccak, create2_address_of, assert_tx_failed
):
code = """
struct Bar:
x: String[32]
FOO: immutable(String[128])
BAR: immutable(Bar)
@external
def __init__(foo: String[128], bar: Bar):
FOO = foo
BAR = bar
@external
def foo() -> String[128]:
return FOO
@external
def bar() -> Bar:
return BAR
"""
deployer_code = """
struct Bar:
x: String[32]
created_address: public(address)
@external
def test(target: address, arg1: String[128], arg2: Bar):
self.created_address = create_from_blueprint(target, arg1, arg2)
@external
def test2(target: address, arg1: String[128], arg2: Bar, salt: bytes32):
self.created_address = create_from_blueprint(target, arg1, arg2, salt=salt)
@external
def test3(target: address, argdata: Bytes[1024]):
self.created_address = create_from_blueprint(target, argdata, raw_args=True)
@external
def test4(target: address, argdata: Bytes[1024], salt: bytes32):
self.created_address = create_from_blueprint(target, argdata, salt=salt, raw_args=True)
@external
def should_fail(target: address, arg1: String[129], arg2: Bar):
self.created_address = create_from_blueprint(target, arg1, arg2)
"""
FOO = "hello!"
BAR = ("world!",)
# deploy a foo so we can compare its bytecode with factory deployed version
foo_contract = get_contract(code, FOO, BAR)
expected_runtime_code = w3.eth.get_code(foo_contract.address)
f, FooContract = deploy_blueprint_for(code)
d = get_contract(deployer_code)
initcode = w3.eth.get_code(f.address)
d.test(f.address, FOO, BAR, transact={})
test = FooContract(d.created_address())
assert w3.eth.get_code(test.address) == expected_runtime_code
assert test.foo() == FOO
assert test.bar() == BAR
# extcodesize check
assert_tx_failed(lambda: d.test("0x" + "00" * 20, FOO, BAR))
# now same thing but with create2
salt = keccak(b"vyper")
d.test2(f.address, FOO, BAR, salt, transact={})
test = FooContract(d.created_address())
assert w3.eth.get_code(test.address) == expected_runtime_code
assert test.foo() == FOO
assert test.bar() == BAR
encoded_args = abi.encode("(string,(string))", (FOO, BAR))
assert HexBytes(test.address) == create2_address_of(d.address, salt, initcode + encoded_args)
d.test3(f.address, encoded_args, transact={})
test = FooContract(d.created_address())
assert w3.eth.get_code(test.address) == expected_runtime_code
assert test.foo() == FOO
assert test.bar() == BAR
d.test4(f.address, encoded_args, keccak(b"test4"), transact={})
test = FooContract(d.created_address())
assert w3.eth.get_code(test.address) == expected_runtime_code
assert test.foo() == FOO
assert test.bar() == BAR
# can't collide addresses
assert_tx_failed(lambda: d.test2(f.address, FOO, BAR, salt))
# ditto - with raw_args
assert_tx_failed(lambda: d.test4(f.address, encoded_args, salt))
# but creating a contract with different args is ok
FOO = "bar"
d.test2(f.address, FOO, BAR, salt, transact={})
# just for kicks
assert FooContract(d.created_address()).foo() == FOO
assert FooContract(d.created_address()).bar() == BAR
# Foo constructor should fail
FOO = "01" * 129
BAR = ("",)
sig = keccak("should_fail(address,string,(string))".encode()).hex()[:10]
encoded = abi.encode("(address,string,(string))", (f.address, FOO, BAR)).hex()
assert_tx_failed(lambda: w3.eth.send_transaction({"to": d.address, "data": f"{sig}{encoded}"}))
def test_create_copy_of(get_contract, w3, keccak, create2_address_of, assert_tx_failed):
code = """
created_address: public(address)
@internal
def _create_copy_of(target: address):
self.created_address = create_copy_of(target)
@internal
def _create_copy_of2(target: address, salt: bytes32):
self.created_address = create_copy_of(target, salt=salt)
@external
def test(target: address) -> address:
x: uint256 = 0
self._create_copy_of(target)
assert x == 0 # check memory not clobbered
return self.created_address
@external
def test2(target: address, salt: bytes32) -> address:
x: uint256 = 0
self._create_copy_of2(target, salt)
assert x == 0 # check memory not clobbered
return self.created_address
"""
c = get_contract(code)
bytecode = w3.eth.get_code(c.address)
c.test(c.address, transact={})
test1 = c.created_address()
assert w3.eth.get_code(test1) == bytecode
# extcodesize check
assert_tx_failed(lambda: c.test("0x" + "00" * 20))
# test1 = c.test(b"\x01")
# assert w3.eth.get_code(test1) == b"\x01"
salt = keccak(b"vyper")
c.test2(c.address, salt, transact={})
test2 = c.created_address()
assert w3.eth.get_code(test2) == bytecode
assert HexBytes(test2) == create2_address_of(c.address, salt, vyper_initcode(bytecode))
# can't create2 where contract already exists
assert_tx_failed(lambda: c.test2(c.address, salt, transact={}))
# test single byte contract
# test2 = c.test2(b"\x01", salt)
# assert HexBytes(test2) == create2_address_of(c.address, salt, vyper_initcode(b"\x01"))
# assert_tx_failed(lambda: c.test2(bytecode, salt))
|
GHSA-c647-pxm2-c52w
|
tests/parser/functions/test_raw_call.py
|
@@ -426,6 +426,164 @@ def baz(_addr: address, should_raise: bool) -> uint256:
assert caller.baz(target.address, False) == 3
+# XXX: these test_raw_call_clean_mem* tests depend on variables and
+# calling convention writing to memory. think of ways to make more
+# robust to changes to calling convention and memory layout.
+
+
+def test_raw_call_msg_data_clean_mem(get_contract):
+ # test msize uses clean memory and does not get overwritten by
+ # any raw_call() arguments
+ code = """
+identity: constant(address) = 0x0000000000000000000000000000000000000004
+
+@external
+def foo():
+ pass
+
+@internal
+@view
+def get_address()->address:
+ a:uint256 = 121 # 0x79
+ return identity
+@external
+def bar(f: uint256, u: uint256) -> Bytes[100]:
+ # embed an internal call in the calculation of address
+ a: Bytes[100] = raw_call(self.get_address(), msg.data, max_outsize=100)
+ return a
+ """
+
+ c = get_contract(code)
+ assert (
+ c.bar(1, 2).hex() == "ae42e951"
+ "0000000000000000000000000000000000000000000000000000000000000001"
+ "0000000000000000000000000000000000000000000000000000000000000002"
+ )
+
+
+def test_raw_call_clean_mem2(get_contract):
+ # test msize uses clean memory and does not get overwritten by
+ # any raw_call() arguments, another way
+ code = """
+buf: Bytes[100]
+
+@external
+def bar(f: uint256, g: uint256, h: uint256) -> Bytes[100]:
+ # embed a memory modifying expression in the calculation of address
+ self.buf = raw_call(
+ [0x0000000000000000000000000000000000000004,][f-1],
+ msg.data,
+ max_outsize=100
+ )
+ return self.buf
+ """
+ c = get_contract(code)
+
+ assert (
+ c.bar(1, 2, 3).hex() == "9309b76e"
+ "0000000000000000000000000000000000000000000000000000000000000001"
+ "0000000000000000000000000000000000000000000000000000000000000002"
+ "0000000000000000000000000000000000000000000000000000000000000003"
+ )
+
+
+def test_raw_call_clean_mem3(get_contract):
+ # test msize uses clean memory and does not get overwritten by
+ # any raw_call() arguments, and also test order of evaluation for
+ # scope_multi
+ code = """
+buf: Bytes[100]
+canary: String[32]
+
+@internal
+def bar() -> address:
+ self.canary = "bar"
+ return 0x0000000000000000000000000000000000000004
+
+@internal
+def goo() -> uint256:
+ self.canary = "goo"
+ return 0
+
+@external
+def foo() -> String[32]:
+ self.buf = raw_call(self.bar(), msg.data, value = self.goo(), max_outsize=100)
+ return self.canary
+ """
+ c = get_contract(code)
+ assert c.foo() == "goo"
+
+
+def test_raw_call_clean_mem_kwargs_value(get_contract):
+ # test msize uses clean memory and does not get overwritten by
+ # any raw_call() kwargs
+ code = """
+buf: Bytes[100]
+
+# add a dummy function to trigger memory expansion in the selector table routine
+@external
+def foo():
+ pass
+
+@internal
+def _value() -> uint256:
+ x: uint256 = 1
+ return x
+
+@external
+def bar(f: uint256) -> Bytes[100]:
+ # embed a memory modifying expression in the calculation of address
+ self.buf = raw_call(
+ 0x0000000000000000000000000000000000000004,
+ msg.data,
+ max_outsize=100,
+ value=self._value()
+ )
+ return self.buf
+ """
+ c = get_contract(code, value=1)
+
+ assert (
+ c.bar(13).hex() == "0423a132"
+ "000000000000000000000000000000000000000000000000000000000000000d"
+ )
+
+
+def test_raw_call_clean_mem_kwargs_gas(get_contract):
+ # test msize uses clean memory and does not get overwritten by
+ # any raw_call() kwargs
+ code = """
+buf: Bytes[100]
+
+# add a dummy function to trigger memory expansion in the selector table routine
+@external
+def foo():
+ pass
+
+@internal
+def _gas() -> uint256:
+ x: uint256 = msg.gas
+ return x
+
+@external
+def bar(f: uint256) -> Bytes[100]:
+ # embed a memory modifying expression in the calculation of address
+ self.buf = raw_call(
+ 0x0000000000000000000000000000000000000004,
+ msg.data,
+ max_outsize=100,
+ gas=self._gas()
+ )
+ return self.buf
+ """
+ c = get_contract(code, value=1)
+
+ assert (
+ c.bar(15).hex() == "0423a132"
+ "000000000000000000000000000000000000000000000000000000000000000f"
+ )
+
+
uncompilable_code = [
(
"""
|
import pytest
from hexbytes import HexBytes
from vyper import compile_code
from vyper.builtins.functions import eip1167_bytecode
from vyper.exceptions import ArgumentException, InvalidType, StateAccessViolation
pytestmark = pytest.mark.usefixtures("memory_mocker")
def test_max_outsize_exceeds_returndatasize(get_contract):
source_code = """
@external
def foo() -> Bytes[7]:
return raw_call(0x0000000000000000000000000000000000000004, b"moose", max_outsize=7)
"""
c = get_contract(source_code)
assert c.foo() == b"moose"
def test_raw_call_non_memory(get_contract):
source_code = """
_foo: Bytes[5]
@external
def foo() -> Bytes[5]:
self._foo = b"moose"
return raw_call(0x0000000000000000000000000000000000000004, self._foo, max_outsize=5)
"""
c = get_contract(source_code)
assert c.foo() == b"moose"
def test_returndatasize_exceeds_max_outsize(get_contract):
source_code = """
@external
def foo() -> Bytes[3]:
return raw_call(0x0000000000000000000000000000000000000004, b"moose", max_outsize=3)
"""
c = get_contract(source_code)
assert c.foo() == b"moo"
def test_returndatasize_matches_max_outsize(get_contract):
source_code = """
@external
def foo() -> Bytes[5]:
return raw_call(0x0000000000000000000000000000000000000004, b"moose", max_outsize=5)
"""
c = get_contract(source_code)
assert c.foo() == b"moose"
def test_multiple_levels(w3, get_contract_with_gas_estimation):
inner_code = """
@external
def returnten() -> int128:
return 10
"""
c = get_contract_with_gas_estimation(inner_code)
outer_code = """
@external
def create_and_call_returnten(inp: address) -> int128:
x: address = create_minimal_proxy_to(inp)
o: int128 = extract32(raw_call(x, b"\\xd0\\x1f\\xb1\\xb8", max_outsize=32, gas=50000), 0, output_type=int128) # noqa: E501
return o
@external
def create_and_return_proxy(inp: address) -> address:
x: address = create_minimal_proxy_to(inp)
return x
"""
c2 = get_contract_with_gas_estimation(outer_code)
assert c2.create_and_call_returnten(c.address) == 10
c2.create_and_call_returnten(c.address, transact={})
_, preamble, callcode = eip1167_bytecode()
c3 = c2.create_and_return_proxy(c.address, call={})
c2.create_and_return_proxy(c.address, transact={})
c3_contract_code = w3.to_bytes(w3.eth.get_code(c3))
assert c3_contract_code[:10] == HexBytes(preamble)
assert c3_contract_code[-15:] == HexBytes(callcode)
print("Passed proxy test")
# TODO: This one is special
# print(f'Gas consumed: {(chain.head_state.receipts[-1].gas_used - chain.head_state.receipts[-2].gas_used - chain.last_tx.intrinsic_gas_used)}') # noqa: E501
def test_multiple_levels2(assert_tx_failed, get_contract_with_gas_estimation):
inner_code = """
@external
def returnten() -> int128:
raise
"""
c = get_contract_with_gas_estimation(inner_code)
outer_code = """
@external
def create_and_call_returnten(inp: address) -> int128:
x: address = create_minimal_proxy_to(inp)
o: int128 = extract32(raw_call(x, b"\\xd0\\x1f\\xb1\\xb8", max_outsize=32, gas=50000), 0, output_type=int128) # noqa: E501
return o
@external
def create_and_return_proxy(inp: address) -> address:
return create_minimal_proxy_to(inp)
"""
c2 = get_contract_with_gas_estimation(outer_code)
assert_tx_failed(lambda: c2.create_and_call_returnten(c.address))
print("Passed minimal proxy exception test")
def test_delegate_call(w3, get_contract):
inner_code = """
a: address # this is required for storage alignment...
owners: public(address[5])
@external
def set_owner(i: int128, o: address):
self.owners[i] = o
"""
inner_contract = get_contract(inner_code)
outer_code = """
owner_setter_contract: public(address)
owners: public(address[5])
@external
def __init__(_owner_setter: address):
self.owner_setter_contract = _owner_setter
@external
def set(i: int128, owner: address):
# delegate setting owners to other contract.s
cdata: Bytes[68] = concat(method_id("set_owner(int128,address)"), convert(i, bytes32), convert(owner, bytes32)) # noqa: E501
raw_call(
self.owner_setter_contract,
cdata,
gas=msg.gas,
max_outsize=0,
is_delegate_call=True
)
"""
a0, a1, a2 = w3.eth.accounts[:3]
outer_contract = get_contract(outer_code, *[inner_contract.address])
# Test setting on inners contract's state setting works.
inner_contract.set_owner(1, a2, transact={})
assert inner_contract.owners(1) == a2
# Confirm outer contract's state is empty and contract to call has been set.
assert outer_contract.owner_setter_contract() == inner_contract.address
assert outer_contract.owners(1) is None
# Call outer contract, that make a delegate call to inner_contract.
tx_hash = outer_contract.set(1, a1, transact={})
assert w3.eth.get_transaction_receipt(tx_hash)["status"] == 1
assert outer_contract.owners(1) == a1
def test_gas(get_contract, assert_tx_failed):
inner_code = """
bar: bytes32
@external
def foo(_bar: bytes32):
self.bar = _bar
"""
inner_contract = get_contract(inner_code)
outer_code = """
@external
def foo_call(_addr: address):
cdata: Bytes[40] = concat(
method_id("foo(bytes32)"),
0x0000000000000000000000000000000000000000000000000000000000000001
)
raw_call(_addr, cdata, max_outsize=0{})
"""
# with no gas value given, enough will be forwarded to complete the call
outer_contract = get_contract(outer_code.format(""))
outer_contract.foo_call(inner_contract.address)
# manually specifying a sufficient amount should succeed
outer_contract = get_contract(outer_code.format(", gas=50000"))
outer_contract.foo_call(inner_contract.address)
# manually specifying an insufficient amount should fail
outer_contract = get_contract(outer_code.format(", gas=15000"))
assert_tx_failed(lambda: outer_contract.foo_call(inner_contract.address))
def test_static_call(get_contract):
target_source = """
@external
@view
def foo() -> int128:
return 42
"""
caller_source = """
@external
@view
def foo(_addr: address) -> int128:
_response: Bytes[32] = raw_call(
_addr,
method_id("foo()"),
max_outsize=32,
is_static_call=True,
)
return convert(_response, int128)
"""
target = get_contract(target_source)
caller = get_contract(caller_source)
assert caller.foo(target.address) == 42
def test_forward_calldata(get_contract, w3, keccak):
target_source = """
@external
def foo() -> uint256:
return 123
"""
caller_source = """
target: address
@external
def set_target(target: address):
self.target = target
@external
def __default__():
assert 123 == _abi_decode(raw_call(self.target, msg.data, max_outsize=32), uint256)
"""
target = get_contract(target_source)
caller = get_contract(caller_source)
caller.set_target(target.address, transact={})
# manually construct msg.data for `caller` contract
sig = keccak("foo()".encode()).hex()[:10]
w3.eth.send_transaction({"to": caller.address, "data": sig})
# check max_outsize=0 does same thing as not setting max_outsize.
# compile to bytecode and compare bytecode directly.
def test_max_outsize_0():
code1 = """
@external
def test_raw_call(_target: address):
raw_call(_target, method_id("foo()"))
"""
code2 = """
@external
def test_raw_call(_target: address):
raw_call(_target, method_id("foo()"), max_outsize=0)
"""
output1 = compile_code(code1, ["bytecode", "bytecode_runtime"])
output2 = compile_code(code2, ["bytecode", "bytecode_runtime"])
assert output1 == output2
# check max_outsize=0 does same thing as not setting max_outsize,
# this time with revert_on_failure set to False
def test_max_outsize_0_no_revert_on_failure():
code1 = """
@external
def test_raw_call(_target: address) -> bool:
# compile raw_call both ways, with revert_on_failure
a: bool = raw_call(_target, method_id("foo()"), revert_on_failure=False)
return a
"""
# same code, but with max_outsize=0
code2 = """
@external
def test_raw_call(_target: address) -> bool:
a: bool = raw_call(_target, method_id("foo()"), max_outsize=0, revert_on_failure=False)
return a
"""
output1 = compile_code(code1, ["bytecode", "bytecode_runtime"])
output2 = compile_code(code2, ["bytecode", "bytecode_runtime"])
assert output1 == output2
# test functionality of max_outsize=0
def test_max_outsize_0_call(get_contract):
target_source = """
@external
@payable
def bar() -> uint256:
return 123
"""
caller_source = """
@external
@payable
def foo(_addr: address) -> bool:
success: bool = raw_call(_addr, method_id("bar()"), max_outsize=0, revert_on_failure=False)
return success
"""
target = get_contract(target_source)
caller = get_contract(caller_source)
assert caller.foo(target.address) is True
def test_static_call_fails_nonpayable(get_contract, assert_tx_failed):
target_source = """
baz: int128
@external
def foo() -> int128:
self.baz = 31337
return self.baz
"""
caller_source = """
@external
@view
def foo(_addr: address) -> int128:
_response: Bytes[32] = raw_call(
_addr,
method_id("foo()"),
max_outsize=32,
is_static_call=True,
)
return convert(_response, int128)
"""
target = get_contract(target_source)
caller = get_contract(caller_source)
assert_tx_failed(lambda: caller.foo(target.address))
def test_checkable_raw_call(get_contract, assert_tx_failed):
target_source = """
baz: int128
@external
def fail1(should_raise: bool):
if should_raise:
raise "fail"
# test both paths for raw_call -
# they are different depending if callee has or doesn't have returntype
# (fail2 fails because of staticcall)
@external
def fail2(should_raise: bool) -> int128:
if should_raise:
self.baz = self.baz + 1
return self.baz
"""
caller_source = """
@external
@view
def foo(_addr: address, should_raise: bool) -> uint256:
success: bool = True
response: Bytes[32] = b""
success, response = raw_call(
_addr,
_abi_encode(should_raise, method_id=method_id("fail1(bool)")),
max_outsize=32,
is_static_call=True,
revert_on_failure=False,
)
assert success == (not should_raise)
return 1
@external
@view
def bar(_addr: address, should_raise: bool) -> uint256:
success: bool = True
response: Bytes[32] = b""
success, response = raw_call(
_addr,
_abi_encode(should_raise, method_id=method_id("fail2(bool)")),
max_outsize=32,
is_static_call=True,
revert_on_failure=False,
)
assert success == (not should_raise)
return 2
# test max_outsize not set case
@external
@nonpayable
def baz(_addr: address, should_raise: bool) -> uint256:
success: bool = True
success = raw_call(
_addr,
_abi_encode(should_raise, method_id=method_id("fail1(bool)")),
revert_on_failure=False,
)
assert success == (not should_raise)
return 3
"""
target = get_contract(target_source)
caller = get_contract(caller_source)
assert caller.foo(target.address, True) == 1
assert caller.foo(target.address, False) == 1
assert caller.bar(target.address, True) == 2
assert caller.bar(target.address, False) == 2
assert caller.baz(target.address, True) == 3
assert caller.baz(target.address, False) == 3
uncompilable_code = [
(
"""
@external
@view
def foo(_addr: address):
raw_call(_addr, method_id("foo()"))
""",
StateAccessViolation,
),
(
"""
@external
def foo(_addr: address):
raw_call(_addr, method_id("foo()"), is_delegate_call=True, is_static_call=True)
""",
ArgumentException,
),
(
"""
@external
@view
def foo(_addr: address):
raw_call(_addr, 256)
""",
InvalidType,
),
]
@pytest.mark.parametrize("source_code,exc", uncompilable_code)
def test_invalid_type_exception(
assert_compile_failed, get_contract_with_gas_estimation, source_code, exc
):
assert_compile_failed(lambda: get_contract_with_gas_estimation(source_code), exc)
|
GHSA-c647-pxm2-c52w
|
vyper/builtins/functions.py
|
@@ -21,6 +21,7 @@
clamp_basetype,
clamp_nonzero,
copy_bytes,
+ dummy_node_for_type,
ensure_in_memory,
eval_once_check,
eval_seq,
@@ -36,7 +37,7 @@
unwrap_location,
)
from vyper.codegen.expr import Expr
-from vyper.codegen.ir_node import Encoding
+from vyper.codegen.ir_node import Encoding, scope_multi
from vyper.codegen.keccak256_helper import keccak256_helper
from vyper.evm.address_space import MEMORY, STORAGE
from vyper.exceptions import (
@@ -1155,14 +1156,17 @@ def build_IR(self, expr, args, kwargs, context):
outsize,
]
- if delegate_call:
- call_op = ["delegatecall", gas, to, *common_call_args]
- elif static_call:
- call_op = ["staticcall", gas, to, *common_call_args]
- else:
- call_op = ["call", gas, to, value, *common_call_args]
+ gas, value = IRnode.from_list(gas), IRnode.from_list(value)
+ with scope_multi((to, value, gas), ("_to", "_value", "_gas")) as (b1, (to, value, gas)):
+ if delegate_call:
+ call_op = ["delegatecall", gas, to, *common_call_args]
+ elif static_call:
+ call_op = ["staticcall", gas, to, *common_call_args]
+ else:
+ call_op = ["call", gas, to, value, *common_call_args]
- call_ir += [call_op]
+ call_ir += [call_op]
+ call_ir = b1.resolve(call_ir)
# build sequence IR
if outsize:
@@ -1589,13 +1593,15 @@ def build_IR(self, expr, context):
# CREATE* functions
+CREATE2_SENTINEL = dummy_node_for_type(BYTES32_T)
+
# create helper functions
# generates CREATE op sequence + zero check for result
-def _create_ir(value, buf, length, salt=None, checked=True):
+def _create_ir(value, buf, length, salt, checked=True):
args = [value, buf, length]
create_op = "create"
- if salt is not None:
+ if salt is not CREATE2_SENTINEL:
create_op = "create2"
args.append(salt)
@@ -1713,8 +1719,9 @@ def build_IR(self, expr, args, kwargs, context):
context.check_is_not_constant("use {self._id}", expr)
should_use_create2 = "salt" in [kwarg.arg for kwarg in expr.keywords]
+
if not should_use_create2:
- kwargs["salt"] = None
+ kwargs["salt"] = CREATE2_SENTINEL
ir_builder = self._build_create_IR(expr, args, context, **kwargs)
@@ -1794,13 +1801,16 @@ def _add_gas_estimate(self, args, should_use_create2):
def _build_create_IR(self, expr, args, context, value, salt):
target = args[0]
- with target.cache_when_complex("create_target") as (b1, target):
+ # something we can pass to scope_multi
+ with scope_multi(
+ (target, value, salt), ("create_target", "create_value", "create_salt")
+ ) as (b1, (target, value, salt)):
codesize = IRnode.from_list(["extcodesize", target])
msize = IRnode.from_list(["msize"])
- with codesize.cache_when_complex("target_codesize") as (
+ with scope_multi((codesize, msize), ("target_codesize", "mem_ofst")) as (
b2,
- codesize,
- ), msize.cache_when_complex("mem_ofst") as (b3, mem_ofst):
+ (codesize, mem_ofst),
+ ):
ir = ["seq"]
# make sure there is actually code at the target
@@ -1824,7 +1834,7 @@ def _build_create_IR(self, expr, args, context, value, salt):
ir.append(_create_ir(value, buf, buf_len, salt))
- return b1.resolve(b2.resolve(b3.resolve(ir)))
+ return b1.resolve(b2.resolve(ir))
class CreateFromBlueprint(_CreateBase):
@@ -1877,17 +1887,18 @@ def _build_create_IR(self, expr, args, context, value, salt, code_offset, raw_ar
# (since the abi encoder could write to fresh memory).
# it would be good to not require the memory copy, but need
# to evaluate memory safety.
- with target.cache_when_complex("create_target") as (b1, target), argslen.cache_when_complex(
- "encoded_args_len"
- ) as (b2, encoded_args_len), code_offset.cache_when_complex("code_ofst") as (b3, codeofst):
- codesize = IRnode.from_list(["sub", ["extcodesize", target], codeofst])
+ with scope_multi(
+ (target, value, salt, argslen, code_offset),
+ ("create_target", "create_value", "create_salt", "encoded_args_len", "code_offset"),
+ ) as (b1, (target, value, salt, encoded_args_len, code_offset)):
+ codesize = IRnode.from_list(["sub", ["extcodesize", target], code_offset])
# copy code to memory starting from msize. we are clobbering
# unused memory so it's safe.
msize = IRnode.from_list(["msize"], location=MEMORY)
- with codesize.cache_when_complex("target_codesize") as (
- b4,
- codesize,
- ), msize.cache_when_complex("mem_ofst") as (b5, mem_ofst):
+ with scope_multi((codesize, msize), ("target_codesize", "mem_ofst")) as (
+ b2,
+ (codesize, mem_ofst),
+ ):
ir = ["seq"]
# make sure there is code at the target, and that
@@ -1907,7 +1918,7 @@ def _build_create_IR(self, expr, args, context, value, salt, code_offset, raw_ar
# copy the target code into memory.
# layout starting from mem_ofst:
# 00...00 (22 0's) | preamble | bytecode
- ir.append(["extcodecopy", target, mem_ofst, codeofst, codesize])
+ ir.append(["extcodecopy", target, mem_ofst, code_offset, codesize])
ir.append(copy_bytes(add_ofst(mem_ofst, codesize), argbuf, encoded_args_len, bufsz))
@@ -1922,7 +1933,7 @@ def _build_create_IR(self, expr, args, context, value, salt, code_offset, raw_ar
ir.append(_create_ir(value, mem_ofst, length, salt))
- return b1.resolve(b2.resolve(b3.resolve(b4.resolve(b5.resolve(ir)))))
+ return b1.resolve(b2.resolve(ir))
class _UnsafeMath(BuiltinFunction):
|
import hashlib
import math
import operator
from decimal import Decimal
from vyper import ast as vy_ast
from vyper.abi_types import ABI_Tuple
from vyper.ast.validation import validate_call_args
from vyper.codegen.abi_encoder import abi_encode
from vyper.codegen.context import Context, VariableRecord
from vyper.codegen.core import (
STORE,
IRnode,
_freshname,
add_ofst,
bytes_data_ptr,
calculate_type_for_external_return,
check_external_call,
clamp,
clamp2,
clamp_basetype,
clamp_nonzero,
copy_bytes,
ensure_in_memory,
eval_once_check,
eval_seq,
get_bytearray_length,
get_type_for_exact_size,
ir_tuple_from_args,
make_setter,
needs_external_call_wrap,
promote_signed_int,
sar,
shl,
shr,
unwrap_location,
)
from vyper.codegen.expr import Expr
from vyper.codegen.ir_node import Encoding
from vyper.codegen.keccak256_helper import keccak256_helper
from vyper.evm.address_space import MEMORY, STORAGE
from vyper.exceptions import (
ArgumentException,
CompilerPanic,
InvalidLiteral,
InvalidType,
OverflowException,
StateAccessViolation,
StructureException,
TypeMismatch,
UnfoldableNode,
ZeroDivisionException,
)
from vyper.semantics.analysis.base import VarInfo
from vyper.semantics.analysis.utils import (
get_common_types,
get_exact_type_from_node,
get_possible_types_from_node,
validate_expected_type,
)
from vyper.semantics.types import (
TYPE_T,
AddressT,
BoolT,
BytesM_T,
BytesT,
DArrayT,
DecimalT,
HashMapT,
IntegerT,
KwargSettings,
SArrayT,
StringT,
TupleT,
)
from vyper.semantics.types.bytestrings import _BytestringT
from vyper.semantics.types.shortcuts import (
BYTES4_T,
BYTES32_T,
INT128_T,
INT256_T,
UINT8_T,
UINT256_T,
)
from vyper.semantics.types.utils import type_from_annotation
from vyper.utils import (
DECIMAL_DIVISOR,
EIP_170_LIMIT,
SHA3_PER_WORD,
MemoryPositions,
SizeLimits,
bytes_to_int,
ceil32,
fourbytes_to_int,
keccak256,
method_id_int,
vyper_warn,
)
from ._convert import convert
from ._signatures import BuiltinFunction, process_inputs
SHA256_ADDRESS = 2
SHA256_BASE_GAS = 60
SHA256_PER_WORD_GAS = 12
class FoldedFunction(BuiltinFunction):
# Base class for nodes which should always be folded
# Since foldable builtin functions are not folded before semantics validation,
# this flag is used for `check_kwargable` in semantics validation.
_kwargable = True
class TypenameFoldedFunction(FoldedFunction):
# Base class for builtin functions that:
# (1) take a typename as the only argument; and
# (2) should always be folded.
# "TYPE_DEFINITION" is a placeholder value for a type definition string, and
# will be replaced by a `TypeTypeDefinition` object in `infer_arg_types`.
_inputs = [("typename", "TYPE_DEFINITION")]
def fetch_call_return(self, node):
type_ = self.infer_arg_types(node)[0].typedef
return type_
def infer_arg_types(self, node):
validate_call_args(node, 1)
input_typedef = TYPE_T(type_from_annotation(node.args[0]))
return [input_typedef]
class Floor(BuiltinFunction):
_id = "floor"
_inputs = [("value", DecimalT())]
# TODO: maybe use int136?
_return_type = INT256_T
def evaluate(self, node):
validate_call_args(node, 1)
if not isinstance(node.args[0], vy_ast.Decimal):
raise UnfoldableNode
value = math.floor(node.args[0].value)
return vy_ast.Int.from_node(node, value=value)
@process_inputs
def build_IR(self, expr, args, kwargs, context):
arg = args[0]
with arg.cache_when_complex("arg") as (b1, arg):
ret = IRnode.from_list(
[
"if",
["slt", arg, 0],
["sdiv", ["sub", arg, DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR],
["sdiv", arg, DECIMAL_DIVISOR],
],
typ=INT256_T,
)
return b1.resolve(ret)
class Ceil(BuiltinFunction):
_id = "ceil"
_inputs = [("value", DecimalT())]
# TODO: maybe use int136?
_return_type = INT256_T
def evaluate(self, node):
validate_call_args(node, 1)
if not isinstance(node.args[0], vy_ast.Decimal):
raise UnfoldableNode
value = math.ceil(node.args[0].value)
return vy_ast.Int.from_node(node, value=value)
@process_inputs
def build_IR(self, expr, args, kwargs, context):
arg = args[0]
with arg.cache_when_complex("arg") as (b1, arg):
ret = IRnode.from_list(
[
"if",
["slt", arg, 0],
["sdiv", arg, DECIMAL_DIVISOR],
["sdiv", ["add", arg, DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR],
],
typ=INT256_T,
)
return b1.resolve(ret)
class Convert(BuiltinFunction):
_id = "convert"
def fetch_call_return(self, node):
_, target_typedef = self.infer_arg_types(node)
# note: more type conversion validation happens in convert.py
return target_typedef.typedef
# TODO: push this down into convert.py for more consistency
def infer_arg_types(self, node):
validate_call_args(node, 2)
target_type = type_from_annotation(node.args[1])
value_types = get_possible_types_from_node(node.args[0])
# For `convert` of integer literals, we need to match type inference rules in
# convert.py codegen routines.
# TODO: This can probably be removed once constant folding for `convert` is implemented
if len(value_types) > 1 and all(isinstance(v, IntegerT) for v in value_types):
# Get the smallest (and unsigned if available) type for non-integer target types
# (note this is different from the ordering returned by `get_possible_types_from_node`)
if not isinstance(target_type, IntegerT):
value_types = sorted(value_types, key=lambda v: (v.is_signed, v.bits), reverse=True)
else:
# filter out the target type from list of possible types
value_types = [i for i in value_types if not target_type.compare_type(i)]
value_type = value_types.pop()
# block conversions between same type
if target_type.compare_type(value_type):
raise InvalidType(f"Value and target type are both '{target_type}'", node)
return [value_type, TYPE_T(target_type)]
def build_IR(self, expr, context):
return convert(expr, context)
ADHOC_SLICE_NODE_MACROS = ["~calldata", "~selfcode", "~extcode"]
def _build_adhoc_slice_node(sub: IRnode, start: IRnode, length: IRnode, context: Context) -> IRnode:
assert length.is_literal, "typechecker failed"
assert isinstance(length.value, int) # mypy hint
dst_typ = BytesT(length.value)
# allocate a buffer for the return value
np = context.new_internal_variable(dst_typ)
# `msg.data` by `calldatacopy`
if sub.value == "~calldata":
node = [
"seq",
["assert", ["le", ["add", start, length], "calldatasize"]], # runtime bounds check
["mstore", np, length],
["calldatacopy", np + 32, start, length],
np,
]
# `self.code` by `codecopy`
elif sub.value == "~selfcode":
node = [
"seq",
["assert", ["le", ["add", start, length], "codesize"]], # runtime bounds check
["mstore", np, length],
["codecopy", np + 32, start, length],
np,
]
# `<address>.code` by `extcodecopy`
else:
assert sub.value == "~extcode" and len(sub.args) == 1
node = [
"with",
"_extcode_address",
sub.args[0],
[
"seq",
# runtime bounds check
["assert", ["le", ["add", start, length], ["extcodesize", "_extcode_address"]]],
["mstore", np, length],
["extcodecopy", "_extcode_address", np + 32, start, length],
np,
],
]
assert isinstance(length.value, int) # mypy hint
return IRnode.from_list(node, typ=BytesT(length.value), location=MEMORY)
# note: this and a lot of other builtins could be refactored to accept any uint type
class Slice(BuiltinFunction):
_id = "slice"
_inputs = [
("b", (BYTES32_T, BytesT.any(), StringT.any())),
("start", UINT256_T),
("length", UINT256_T),
]
_return_type = None
def fetch_call_return(self, node):
arg_type, _, _ = self.infer_arg_types(node)
if isinstance(arg_type, StringT):
return_type = StringT()
else:
return_type = BytesT()
# validate start and length are in bounds
arg = node.args[0]
start_expr = node.args[1]
length_expr = node.args[2]
# CMC 2022-03-22 NOTE slight code duplication with semantics/analysis/local
is_adhoc_slice = arg.get("attr") == "code" or (
arg.get("value.id") == "msg" and arg.get("attr") == "data"
)
start_literal = start_expr.value if isinstance(start_expr, vy_ast.Int) else None
length_literal = length_expr.value if isinstance(length_expr, vy_ast.Int) else None
if not is_adhoc_slice:
if length_literal is not None:
if length_literal < 1:
raise ArgumentException("Length cannot be less than 1", length_expr)
if length_literal > arg_type.length:
raise ArgumentException(f"slice out of bounds for {arg_type}", length_expr)
if start_literal is not None:
if start_literal > arg_type.length:
raise ArgumentException(f"slice out of bounds for {arg_type}", start_expr)
if length_literal is not None and start_literal + length_literal > arg_type.length:
raise ArgumentException(f"slice out of bounds for {arg_type}", node)
# we know the length statically
if length_literal is not None:
return_type.set_length(length_literal)
else:
return_type.set_min_length(arg_type.length)
return return_type
def infer_arg_types(self, node):
self._validate_arg_types(node)
# return a concrete type for `b`
b_type = get_possible_types_from_node(node.args[0]).pop()
return [b_type, self._inputs[1][1], self._inputs[2][1]]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
src, start, length = args
# Handle `msg.data`, `self.code`, and `<address>.code`
if src.value in ADHOC_SLICE_NODE_MACROS:
return _build_adhoc_slice_node(src, start, length, context)
is_bytes32 = src.typ == BYTES32_T
if src.location is None:
# it's not a pointer; force it to be one since
# copy_bytes works on pointers.
assert is_bytes32, src
src = ensure_in_memory(src, context)
with src.cache_when_complex("src") as (b1, src), start.cache_when_complex("start") as (
b2,
start,
), length.cache_when_complex("length") as (b3, length):
if is_bytes32:
src_maxlen = 32
else:
src_maxlen = src.typ.maxlen
dst_maxlen = length.value if length.is_literal else src_maxlen
buflen = dst_maxlen
# add 32 bytes to the buffer size bc word access might
# be unaligned (see below)
if src.location == STORAGE:
buflen += 32
# Get returntype string or bytes
assert isinstance(src.typ, _BytestringT) or is_bytes32
# TODO: try to get dst_typ from semantic analysis
if isinstance(src.typ, StringT):
dst_typ = StringT(dst_maxlen)
else:
dst_typ = BytesT(dst_maxlen)
# allocate a buffer for the return value
buf = context.new_internal_variable(BytesT(buflen))
# assign it the correct return type.
# (note mismatch between dst_maxlen and buflen)
dst = IRnode.from_list(buf, typ=dst_typ, location=MEMORY)
dst_data = bytes_data_ptr(dst)
if is_bytes32:
src_len = 32
src_data = src
else:
src_len = get_bytearray_length(src)
src_data = bytes_data_ptr(src)
# general case. byte-for-byte copy
if src.location == STORAGE:
# because slice uses byte-addressing but storage
# is word-aligned, this algorithm starts at some number
# of bytes before the data section starts, and might copy
# an extra word. the pseudocode is:
# dst_data = dst + 32
# copy_dst = dst_data - start % 32
# src_data = src + 32
# copy_src = src_data + (start - start % 32) / 32
# = src_data + (start // 32)
# copy_bytes(copy_dst, copy_src, length)
# //set length AFTER copy because the length word has been clobbered!
# mstore(src, length)
# start at the first word-aligned address before `start`
# e.g. start == byte 7 -> we start copying from byte 0
# start == byte 32 -> we start copying from byte 32
copy_src = IRnode.from_list(
["add", src_data, ["div", start, 32]], location=src.location
)
# e.g. start == byte 0 -> we copy to dst_data + 0
# start == byte 7 -> we copy to dst_data - 7
# start == byte 33 -> we copy to dst_data - 1
copy_dst = IRnode.from_list(
["sub", dst_data, ["mod", start, 32]], location=dst.location
)
# len + (32 if start % 32 > 0 else 0)
copy_len = ["add", length, ["mul", 32, ["iszero", ["iszero", ["mod", start, 32]]]]]
copy_maxlen = buflen
else:
# all other address spaces (mem, calldata, code) we have
# byte-aligned access so we can just do the easy thing,
# memcopy(dst_data, src_data + dst_data)
copy_src = add_ofst(src_data, start)
copy_dst = dst_data
copy_len = length
copy_maxlen = buflen
do_copy = copy_bytes(copy_dst, copy_src, copy_len, copy_maxlen)
ret = [
"seq",
# make sure we don't overrun the source buffer
["assert", ["le", ["add", start, length], src_len]], # bounds check
do_copy,
["mstore", dst, length], # set length
dst, # return pointer to dst
]
ret = IRnode.from_list(ret, typ=dst_typ, location=MEMORY)
return b1.resolve(b2.resolve(b3.resolve(ret)))
class Len(BuiltinFunction):
_id = "len"
_inputs = [("b", (StringT.any(), BytesT.any(), DArrayT.any()))]
_return_type = UINT256_T
def evaluate(self, node):
validate_call_args(node, 1)
arg = node.args[0]
if isinstance(arg, (vy_ast.Str, vy_ast.Bytes)):
length = len(arg.value)
elif isinstance(arg, vy_ast.Hex):
# 2 characters represent 1 byte and we subtract 1 to ignore the leading `0x`
length = len(arg.value) // 2 - 1
else:
raise UnfoldableNode
return vy_ast.Int.from_node(node, value=length)
def build_IR(self, node, context):
arg = Expr(node.args[0], context).ir_node
if arg.value == "~calldata":
return IRnode.from_list(["calldatasize"], typ=UINT256_T)
return get_bytearray_length(arg)
class Concat(BuiltinFunction):
_id = "concat"
def fetch_call_return(self, node):
arg_types = self.infer_arg_types(node)
length = 0
for arg_t in arg_types:
length += arg_t.length
if isinstance(arg_types[0], (StringT)):
return_type = StringT()
else:
return_type = BytesT()
return_type.set_length(length)
return return_type
def infer_arg_types(self, node):
if len(node.args) < 2:
raise ArgumentException("Invalid argument count: expected at least 2", node)
if node.keywords:
raise ArgumentException("Keyword arguments are not accepted here", node.keywords[0])
ret = []
prev_typeclass = None
for arg in node.args:
validate_expected_type(arg, (BytesT.any(), StringT.any(), BytesM_T.any()))
arg_t = get_possible_types_from_node(arg).pop()
current_typeclass = "String" if isinstance(arg_t, StringT) else "Bytes"
if prev_typeclass and current_typeclass != prev_typeclass:
raise TypeMismatch(
(
"Concat expects consistent use of string or bytes types, "
"use either string or bytes."
),
arg,
)
prev_typeclass = current_typeclass
ret.append(arg_t)
return ret
def build_IR(self, expr, context):
args = [Expr(arg, context).ir_node for arg in expr.args]
if len(args) < 2:
raise StructureException("Concat expects at least two arguments", expr)
# Maximum length of the output
dst_maxlen = sum(
[arg.typ.maxlen if isinstance(arg.typ, _BytestringT) else arg.typ.m for arg in args]
)
# TODO: try to grab these from semantic analysis
if isinstance(args[0].typ, StringT):
ret_typ = StringT(dst_maxlen)
else:
ret_typ = BytesT(dst_maxlen)
# Node representing the position of the output in memory
dst = IRnode.from_list(
context.new_internal_variable(ret_typ),
typ=ret_typ,
location=MEMORY,
annotation="concat destination",
)
ret = ["seq"]
# stack item representing our current offset in the dst buffer
ofst = "concat_ofst"
# TODO: optimize for the case where all lengths are statically known.
for arg in args:
dst_data = add_ofst(bytes_data_ptr(dst), ofst)
if isinstance(arg.typ, _BytestringT):
# Ignore empty strings
if arg.typ.maxlen == 0:
continue
with arg.cache_when_complex("arg") as (b1, arg):
argdata = bytes_data_ptr(arg)
with get_bytearray_length(arg).cache_when_complex("len") as (b2, arglen):
do_copy = [
"seq",
copy_bytes(dst_data, argdata, arglen, arg.typ.maxlen),
["set", ofst, ["add", ofst, arglen]],
]
ret.append(b1.resolve(b2.resolve(do_copy)))
else:
ret.append(STORE(dst_data, unwrap_location(arg)))
ret.append(["set", ofst, ["add", ofst, arg.typ.m]])
ret.append(STORE(dst, ofst))
# Memory location of the output
ret.append(dst)
return IRnode.from_list(
["with", ofst, 0, ret], typ=ret_typ, location=MEMORY, annotation="concat"
)
class Keccak256(BuiltinFunction):
_id = "keccak256"
# TODO allow any BytesM_T
_inputs = [("value", (BytesT.any(), BYTES32_T, StringT.any()))]
_return_type = BYTES32_T
def evaluate(self, node):
validate_call_args(node, 1)
if isinstance(node.args[0], vy_ast.Bytes):
value = node.args[0].value
elif isinstance(node.args[0], vy_ast.Str):
value = node.args[0].value.encode()
elif isinstance(node.args[0], vy_ast.Hex):
length = len(node.args[0].value) // 2 - 1
value = int(node.args[0].value, 16).to_bytes(length, "big")
else:
raise UnfoldableNode
hash_ = f"0x{keccak256(value).hex()}"
return vy_ast.Hex.from_node(node, value=hash_)
def infer_arg_types(self, node):
self._validate_arg_types(node)
# return a concrete type for `value`
value_type = get_possible_types_from_node(node.args[0]).pop()
return [value_type]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
assert len(args) == 1
return keccak256_helper(args[0], context)
def _make_sha256_call(inp_start, inp_len, out_start, out_len):
return [
"assert",
[
"staticcall",
["gas"], # gas
SHA256_ADDRESS, # address
inp_start,
inp_len,
out_start,
out_len,
],
]
class Sha256(BuiltinFunction):
_id = "sha256"
_inputs = [("value", (BYTES32_T, BytesT.any(), StringT.any()))]
_return_type = BYTES32_T
def evaluate(self, node):
validate_call_args(node, 1)
if isinstance(node.args[0], vy_ast.Bytes):
value = node.args[0].value
elif isinstance(node.args[0], vy_ast.Str):
value = node.args[0].value.encode()
elif isinstance(node.args[0], vy_ast.Hex):
length = len(node.args[0].value) // 2 - 1
value = int(node.args[0].value, 16).to_bytes(length, "big")
else:
raise UnfoldableNode
hash_ = f"0x{hashlib.sha256(value).hexdigest()}"
return vy_ast.Hex.from_node(node, value=hash_)
def infer_arg_types(self, node):
self._validate_arg_types(node)
# return a concrete type for `value`
value_type = get_possible_types_from_node(node.args[0]).pop()
return [value_type]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
sub = args[0]
# bytes32 input
if sub.typ == BYTES32_T:
return IRnode.from_list(
[
"seq",
["mstore", MemoryPositions.FREE_VAR_SPACE, sub],
_make_sha256_call(
inp_start=MemoryPositions.FREE_VAR_SPACE,
inp_len=32,
out_start=MemoryPositions.FREE_VAR_SPACE,
out_len=32,
),
["mload", MemoryPositions.FREE_VAR_SPACE], # push value onto stack
],
typ=BYTES32_T,
add_gas_estimate=SHA256_BASE_GAS + 1 * SHA256_PER_WORD_GAS,
)
# bytearay-like input
# special case if it's already in memory
sub = ensure_in_memory(sub, context)
return IRnode.from_list(
[
"with",
"_sub",
sub,
[
"seq",
_make_sha256_call(
# TODO use add_ofst if sub is statically known
inp_start=["add", "_sub", 32],
inp_len=["mload", "_sub"],
out_start=MemoryPositions.FREE_VAR_SPACE,
out_len=32,
),
["mload", MemoryPositions.FREE_VAR_SPACE],
],
],
typ=BYTES32_T,
add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS,
)
class MethodID(FoldedFunction):
_id = "method_id"
def evaluate(self, node):
validate_call_args(node, 1, ["output_type"])
args = node.args
if not isinstance(args[0], vy_ast.Str):
raise InvalidType("method id must be given as a literal string", args[0])
if " " in args[0].value:
raise InvalidLiteral("Invalid function signature - no spaces allowed.")
return_type = self.infer_kwarg_types(node)
value = method_id_int(args[0].value)
if return_type.compare_type(BYTES4_T):
return vy_ast.Hex.from_node(node, value=hex(value))
else:
return vy_ast.Bytes.from_node(node, value=value.to_bytes(4, "big"))
def fetch_call_return(self, node):
validate_call_args(node, 1, ["output_type"])
type_ = self.infer_kwarg_types(node)
return type_
def infer_kwarg_types(self, node):
if node.keywords:
return_type = type_from_annotation(node.keywords[0].value)
if return_type.compare_type(BYTES4_T):
return BYTES4_T
elif isinstance(return_type, BytesT) and return_type.length == 4:
return BytesT(4)
else:
raise ArgumentException("output_type must be Bytes[4] or bytes4", node.keywords[0])
# If `output_type` is not given, default to `Bytes[4]`
return BytesT(4)
class ECRecover(BuiltinFunction):
_id = "ecrecover"
_inputs = [
("hash", BYTES32_T),
("v", (UINT256_T, UINT8_T)),
("r", (UINT256_T, BYTES32_T)),
("s", (UINT256_T, BYTES32_T)),
]
_return_type = AddressT()
def infer_arg_types(self, node):
self._validate_arg_types(node)
v_t, r_t, s_t = [get_possible_types_from_node(arg).pop() for arg in node.args[1:]]
return [BYTES32_T, v_t, r_t, s_t]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
input_buf = context.new_internal_variable(get_type_for_exact_size(128))
output_buf = context.new_internal_variable(get_type_for_exact_size(32))
return IRnode.from_list(
[
"seq",
# clear output memory first, ecrecover can return 0 bytes
["mstore", output_buf, 0],
["mstore", input_buf, args[0]],
["mstore", input_buf + 32, args[1]],
["mstore", input_buf + 64, args[2]],
["mstore", input_buf + 96, args[3]],
["staticcall", "gas", 1, input_buf, 128, output_buf, 32],
["mload", output_buf],
],
typ=AddressT(),
)
class _ECArith(BuiltinFunction):
@process_inputs
def build_IR(self, expr, _args, kwargs, context):
args_tuple = ir_tuple_from_args(_args)
args_t = args_tuple.typ
input_buf = IRnode.from_list(
context.new_internal_variable(args_t), typ=args_t, location=MEMORY
)
ret_t = self._return_type
ret = ["seq"]
ret.append(make_setter(input_buf, args_tuple))
output_buf = context.new_internal_variable(ret_t)
args_ofst = input_buf
args_len = args_t.memory_bytes_required
out_ofst = output_buf
out_len = ret_t.memory_bytes_required
ret.append(
[
"assert",
["staticcall", ["gas"], self._precompile, args_ofst, args_len, out_ofst, out_len],
]
)
ret.append(output_buf)
return IRnode.from_list(ret, typ=ret_t, location=MEMORY)
class ECAdd(_ECArith):
_id = "ecadd"
_inputs = [("a", SArrayT(UINT256_T, 2)), ("b", SArrayT(UINT256_T, 2))]
_return_type = SArrayT(UINT256_T, 2)
_precompile = 0x6
class ECMul(_ECArith):
_id = "ecmul"
_inputs = [("point", SArrayT(UINT256_T, 2)), ("scalar", UINT256_T)]
_return_type = SArrayT(UINT256_T, 2)
_precompile = 0x7
def _generic_element_getter(op):
def f(index):
return IRnode.from_list(
[op, ["add", "_sub", ["add", 32, ["mul", 32, index]]]], typ=INT128_T
)
return f
def _storage_element_getter(index):
return IRnode.from_list(["sload", ["add", "_sub", ["add", 1, index]]], typ=INT128_T)
class Extract32(BuiltinFunction):
_id = "extract32"
_inputs = [("b", BytesT.any()), ("start", IntegerT.unsigneds())]
# "TYPE_DEFINITION" is a placeholder value for a type definition string, and
# will be replaced by a `TYPE_T` object in `infer_kwarg_types`
# (note that it is ignored in _validate_arg_types)
_kwargs = {"output_type": KwargSettings("TYPE_DEFINITION", BYTES32_T)}
_return_type = None
def fetch_call_return(self, node):
self._validate_arg_types(node)
return_type = self.infer_kwarg_types(node)["output_type"].typedef
return return_type
def infer_arg_types(self, node):
self._validate_arg_types(node)
input_type = get_possible_types_from_node(node.args[0]).pop()
return [input_type, UINT256_T]
def infer_kwarg_types(self, node):
if node.keywords:
output_type = type_from_annotation(node.keywords[0].value)
if not isinstance(output_type, (AddressT, BytesM_T, IntegerT)):
raise InvalidType(
"Output type must be one of integer, bytes32 or address", node.keywords[0].value
)
output_typedef = TYPE_T(output_type)
node.keywords[0].value._metadata["type"] = output_typedef
else:
output_typedef = TYPE_T(BYTES32_T)
return {"output_type": output_typedef}
@process_inputs
def build_IR(self, expr, args, kwargs, context):
sub, index = args
ret_type = kwargs["output_type"]
# Get length and specific element
if sub.location == STORAGE:
lengetter = IRnode.from_list(["sload", "_sub"], typ=INT128_T)
elementgetter = _storage_element_getter
else:
op = sub.location.load_op
lengetter = IRnode.from_list([op, "_sub"], typ=INT128_T)
elementgetter = _generic_element_getter(op)
# TODO rewrite all this with cache_when_complex and bitshifts
# Special case: index known to be a multiple of 32
if isinstance(index.value, int) and not index.value % 32:
o = IRnode.from_list(
[
"with",
"_sub",
sub,
elementgetter(
["div", clamp2(0, index, ["sub", lengetter, 32], signed=True), 32]
),
],
typ=ret_type,
annotation="extracting 32 bytes",
)
# General case
else:
o = IRnode.from_list(
[
"with",
"_sub",
sub,
[
"with",
"_len",
lengetter,
[
"with",
"_index",
clamp2(0, index, ["sub", "_len", 32], signed=True),
[
"with",
"_mi32",
["mod", "_index", 32],
[
"with",
"_di32",
["div", "_index", 32],
[
"if",
"_mi32",
[
"add",
["mul", elementgetter("_di32"), ["exp", 256, "_mi32"]],
[
"div",
elementgetter(["add", "_di32", 1]),
["exp", 256, ["sub", 32, "_mi32"]],
],
],
elementgetter("_di32"),
],
],
],
],
],
],
typ=ret_type,
annotation="extract32",
)
return IRnode.from_list(clamp_basetype(o), typ=ret_type)
class AsWeiValue(BuiltinFunction):
_id = "as_wei_value"
_inputs = [("value", (IntegerT.any(), DecimalT())), ("unit", StringT.any())]
_return_type = UINT256_T
wei_denoms = {
("wei",): 1,
("femtoether", "kwei", "babbage"): 10**3,
("picoether", "mwei", "lovelace"): 10**6,
("nanoether", "gwei", "shannon"): 10**9,
("microether", "szabo"): 10**12,
("milliether", "finney"): 10**15,
("ether",): 10**18,
("kether", "grand"): 10**21,
}
def get_denomination(self, node):
if not isinstance(node.args[1], vy_ast.Str):
raise ArgumentException(
"Wei denomination must be given as a literal string", node.args[1]
)
try:
denom = next(v for k, v in self.wei_denoms.items() if node.args[1].value in k)
except StopIteration:
raise ArgumentException(
f"Unknown denomination: {node.args[1].value}", node.args[1]
) from None
return denom
def evaluate(self, node):
validate_call_args(node, 2)
denom = self.get_denomination(node)
if not isinstance(node.args[0], (vy_ast.Decimal, vy_ast.Int)):
raise UnfoldableNode
value = node.args[0].value
if value < 0:
raise InvalidLiteral("Negative wei value not allowed", node.args[0])
if isinstance(value, int) and value >= 2**256:
raise InvalidLiteral("Value out of range for uint256", node.args[0])
if isinstance(value, Decimal) and value > SizeLimits.MAX_AST_DECIMAL:
raise InvalidLiteral("Value out of range for decimal", node.args[0])
return vy_ast.Int.from_node(node, value=int(value * denom))
def fetch_call_return(self, node):
self.infer_arg_types(node)
return self._return_type
def infer_arg_types(self, node):
self._validate_arg_types(node)
# return a concrete type instead of abstract type
value_type = get_possible_types_from_node(node.args[0]).pop()
unit_type = get_possible_types_from_node(node.args[1]).pop()
return [value_type, unit_type]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
value = args[0]
denom_divisor = self.get_denomination(expr)
with value.cache_when_complex("value") as (b1, value):
if value.typ in (UINT256_T, UINT8_T):
sub = [
"with",
"ans",
["mul", value, denom_divisor],
[
"seq",
[
"assert",
["or", ["eq", ["div", "ans", value], denom_divisor], ["iszero", value]],
],
"ans",
],
]
elif value.typ == INT128_T:
# signed types do not require bounds checks because the
# largest possible converted value will not overflow 2**256
sub = ["seq", ["assert", ["sgt", value, -1]], ["mul", value, denom_divisor]]
elif value.typ == DecimalT():
sub = [
"seq",
["assert", ["sgt", value, -1]],
["div", ["mul", value, denom_divisor], DECIMAL_DIVISOR],
]
else:
raise CompilerPanic(f"Unexpected type: {value.typ}")
return IRnode.from_list(b1.resolve(sub), typ=UINT256_T)
zero_value = IRnode.from_list(0, typ=UINT256_T)
empty_value = IRnode.from_list(0, typ=BYTES32_T)
class RawCall(BuiltinFunction):
_id = "raw_call"
_inputs = [("to", AddressT()), ("data", BytesT.any())]
_kwargs = {
"max_outsize": KwargSettings(UINT256_T, 0, require_literal=True),
"gas": KwargSettings(UINT256_T, "gas"),
"value": KwargSettings(UINT256_T, zero_value),
"is_delegate_call": KwargSettings(BoolT(), False, require_literal=True),
"is_static_call": KwargSettings(BoolT(), False, require_literal=True),
"revert_on_failure": KwargSettings(BoolT(), True, require_literal=True),
}
_return_type = None
def fetch_call_return(self, node):
self._validate_arg_types(node)
kwargz = {i.arg: i.value for i in node.keywords}
outsize = kwargz.get("max_outsize")
revert_on_failure = kwargz.get("revert_on_failure")
revert_on_failure = revert_on_failure.value if revert_on_failure is not None else True
if outsize is None or outsize.value == 0:
if revert_on_failure:
return None
return BoolT()
if not isinstance(outsize, vy_ast.Int) or outsize.value < 0:
raise
if outsize.value:
return_type = BytesT()
return_type.set_min_length(outsize.value)
if revert_on_failure:
return return_type
return TupleT([BoolT(), return_type])
def infer_arg_types(self, node):
self._validate_arg_types(node)
# return a concrete type for `data`
data_type = get_possible_types_from_node(node.args[1]).pop()
return [self._inputs[0][1], data_type]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
to, data = args
# TODO: must compile in source code order, left-to-right
gas, value, outsize, delegate_call, static_call, revert_on_failure = (
kwargs["gas"],
kwargs["value"],
kwargs["max_outsize"],
kwargs["is_delegate_call"],
kwargs["is_static_call"],
kwargs["revert_on_failure"],
)
if delegate_call and static_call:
raise ArgumentException(
"Call may use one of `is_delegate_call` or `is_static_call`, not both", expr
)
if not static_call and context.is_constant():
raise StateAccessViolation(
f"Cannot make modifying calls from {context.pp_constancy()},"
" use `is_static_call=True` to perform this action",
expr,
)
if data.value == "~calldata":
call_ir = ["with", "mem_ofst", "msize"]
args_ofst = ["seq", ["calldatacopy", "mem_ofst", 0, "calldatasize"], "mem_ofst"]
args_len = "calldatasize"
else:
# some gymnastics to propagate constants (if eval_input_buf
# returns a static memory location)
eval_input_buf = ensure_in_memory(data, context)
input_buf = eval_seq(eval_input_buf)
if input_buf is None:
call_ir = ["with", "arg_buf", eval_input_buf]
input_buf = IRnode.from_list("arg_buf")
else:
call_ir = ["seq", eval_input_buf]
args_ofst = add_ofst(input_buf, 32)
args_len = ["mload", input_buf]
output_node = IRnode.from_list(
context.new_internal_variable(BytesT(outsize)), typ=BytesT(outsize), location=MEMORY
)
bool_ty = BoolT()
# build IR for call or delegatecall
common_call_args = [
args_ofst,
args_len,
# if there is no return value, the return offset can be 0
add_ofst(output_node, 32) if outsize else 0,
outsize,
]
if delegate_call:
call_op = ["delegatecall", gas, to, *common_call_args]
elif static_call:
call_op = ["staticcall", gas, to, *common_call_args]
else:
call_op = ["call", gas, to, value, *common_call_args]
call_ir += [call_op]
# build sequence IR
if outsize:
# return minimum of outsize and returndatasize
size = ["select", ["lt", outsize, "returndatasize"], outsize, "returndatasize"]
# store output size and return output location
store_output_size = ["seq", ["mstore", output_node, size], output_node]
bytes_ty = BytesT(outsize)
if revert_on_failure:
typ = bytes_ty
# check the call success flag, and store returndata in memory
ret_ir = ["seq", check_external_call(call_ir), store_output_size]
return IRnode.from_list(ret_ir, typ=typ, location=MEMORY)
else:
typ = TupleT([bool_ty, bytes_ty])
ret_ir = [
"multi",
# use IRnode.from_list to make sure the types are
# set properly on the "multi" members
IRnode.from_list(call_ir, typ=bool_ty),
IRnode.from_list(store_output_size, typ=bytes_ty, location=MEMORY),
]
# return an IR tuple of call success flag and returndata pointer
return IRnode.from_list(ret_ir, typ=typ)
# max_outsize is 0.
if not revert_on_failure:
# return call flag as stack item
typ = bool_ty
return IRnode.from_list(call_ir, typ=typ)
else:
# check the call success flag and don't return anything
ret_ir = check_external_call(call_ir)
return IRnode.from_list(ret_ir, typ=None)
raise CompilerPanic("unreachable!")
class Send(BuiltinFunction):
_id = "send"
_inputs = [("to", AddressT()), ("value", UINT256_T)]
# default gas stipend is 0
_kwargs = {"gas": KwargSettings(UINT256_T, 0)}
_return_type = None
@process_inputs
def build_IR(self, expr, args, kwargs, context):
to, value = args
gas = kwargs["gas"]
context.check_is_not_constant("send ether", expr)
return IRnode.from_list(
["assert", ["call", gas, to, value, 0, 0, 0, 0]], error_msg="send failed"
)
class SelfDestruct(BuiltinFunction):
_id = "selfdestruct"
_inputs = [("to", AddressT())]
_return_type = None
_is_terminus = True
_warned = False
@process_inputs
def build_IR(self, expr, args, kwargs, context):
if not self._warned:
vyper_warn("`selfdestruct` is deprecated! The opcode is no longer recommended for use.")
self._warned = True
context.check_is_not_constant("selfdestruct", expr)
return IRnode.from_list(
["seq", eval_once_check(_freshname("selfdestruct")), ["selfdestruct", args[0]]]
)
class BlockHash(BuiltinFunction):
_id = "blockhash"
_inputs = [("block_num", UINT256_T)]
_return_type = BYTES32_T
@process_inputs
def build_IR(self, expr, args, kwargs, contact):
return IRnode.from_list(
["blockhash", clamp("lt", clamp("sge", args[0], ["sub", ["number"], 256]), "number")],
typ=BYTES32_T,
)
class RawRevert(BuiltinFunction):
_id = "raw_revert"
_inputs = [("data", BytesT.any())]
_return_type = None
_is_terminus = True
def fetch_call_return(self, node):
return None
def infer_arg_types(self, node):
self._validate_arg_types(node)
data_type = get_possible_types_from_node(node.args[0]).pop()
return [data_type]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
with ensure_in_memory(args[0], context).cache_when_complex("err_buf") as (b, buf):
data = bytes_data_ptr(buf)
len_ = get_bytearray_length(buf)
return b.resolve(IRnode.from_list(["revert", data, len_]))
class RawLog(BuiltinFunction):
_id = "raw_log"
_inputs = [("topics", DArrayT(BYTES32_T, 4)), ("data", (BYTES32_T, BytesT.any()))]
def fetch_call_return(self, node):
self.infer_arg_types(node)
def infer_arg_types(self, node):
self._validate_arg_types(node)
if not isinstance(node.args[0], vy_ast.List) or len(node.args[0].elements) > 4:
raise InvalidType("Expecting a list of 0-4 topics as first argument", node.args[0])
# return a concrete type for `data`
data_type = get_possible_types_from_node(node.args[1]).pop()
return [self._inputs[0][1], data_type]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
topics_length = len(expr.args[0].elements)
topics = args[0].args
# sanity check topics is a literal list
assert args[0].value in ("~empty", "multi")
data = args[1]
if data.typ == BYTES32_T:
placeholder = context.new_internal_variable(BYTES32_T)
return IRnode.from_list(
[
"seq",
# TODO use make_setter
["mstore", placeholder, unwrap_location(data)],
["log" + str(topics_length), placeholder, 32] + topics,
]
)
input_buf = ensure_in_memory(data, context)
return IRnode.from_list(
[
"with",
"_sub",
input_buf,
["log" + str(topics_length), ["add", "_sub", 32], ["mload", "_sub"], *topics],
]
)
class BitwiseAnd(BuiltinFunction):
_id = "bitwise_and"
_inputs = [("x", UINT256_T), ("y", UINT256_T)]
_return_type = UINT256_T
_warned = False
def evaluate(self, node):
if not self.__class__._warned:
vyper_warn("`bitwise_and()` is deprecated! Please use the & operator instead.")
self.__class__._warned = True
validate_call_args(node, 2)
for arg in node.args:
if not isinstance(arg, vy_ast.Int):
raise UnfoldableNode
if arg.value < 0 or arg.value >= 2**256:
raise InvalidLiteral("Value out of range for uint256", arg)
value = node.args[0].value & node.args[1].value
return vy_ast.Int.from_node(node, value=value)
@process_inputs
def build_IR(self, expr, args, kwargs, context):
return IRnode.from_list(["and", args[0], args[1]], typ=UINT256_T)
class BitwiseOr(BuiltinFunction):
_id = "bitwise_or"
_inputs = [("x", UINT256_T), ("y", UINT256_T)]
_return_type = UINT256_T
_warned = False
def evaluate(self, node):
if not self.__class__._warned:
vyper_warn("`bitwise_or()` is deprecated! Please use the | operator instead.")
self.__class__._warned = True
validate_call_args(node, 2)
for arg in node.args:
if not isinstance(arg, vy_ast.Int):
raise UnfoldableNode
if arg.value < 0 or arg.value >= 2**256:
raise InvalidLiteral("Value out of range for uint256", arg)
value = node.args[0].value | node.args[1].value
return vy_ast.Int.from_node(node, value=value)
@process_inputs
def build_IR(self, expr, args, kwargs, context):
return IRnode.from_list(["or", args[0], args[1]], typ=UINT256_T)
class BitwiseXor(BuiltinFunction):
_id = "bitwise_xor"
_inputs = [("x", UINT256_T), ("y", UINT256_T)]
_return_type = UINT256_T
_warned = False
def evaluate(self, node):
if not self.__class__._warned:
vyper_warn("`bitwise_xor()` is deprecated! Please use the ^ operator instead.")
self.__class__._warned = True
validate_call_args(node, 2)
for arg in node.args:
if not isinstance(arg, vy_ast.Int):
raise UnfoldableNode
if arg.value < 0 or arg.value >= 2**256:
raise InvalidLiteral("Value out of range for uint256", arg)
value = node.args[0].value ^ node.args[1].value
return vy_ast.Int.from_node(node, value=value)
@process_inputs
def build_IR(self, expr, args, kwargs, context):
return IRnode.from_list(["xor", args[0], args[1]], typ=UINT256_T)
class BitwiseNot(BuiltinFunction):
_id = "bitwise_not"
_inputs = [("x", UINT256_T)]
_return_type = UINT256_T
_warned = False
def evaluate(self, node):
if not self.__class__._warned:
vyper_warn("`bitwise_not()` is deprecated! Please use the ^ operator instead.")
self.__class__._warned = True
validate_call_args(node, 1)
if not isinstance(node.args[0], vy_ast.Int):
raise UnfoldableNode
value = node.args[0].value
if value < 0 or value >= 2**256:
raise InvalidLiteral("Value out of range for uint256", node.args[0])
value = (2**256 - 1) - value
return vy_ast.Int.from_node(node, value=value)
@process_inputs
def build_IR(self, expr, args, kwargs, context):
return IRnode.from_list(["not", args[0]], typ=UINT256_T)
class Shift(BuiltinFunction):
_id = "shift"
_inputs = [("x", (UINT256_T, INT256_T)), ("_shift_bits", IntegerT.any())]
_return_type = UINT256_T
_warned = False
def evaluate(self, node):
if not self.__class__._warned:
vyper_warn("`shift()` is deprecated! Please use the << or >> operator instead.")
self.__class__._warned = True
validate_call_args(node, 2)
if [i for i in node.args if not isinstance(i, vy_ast.Int)]:
raise UnfoldableNode
value, shift = [i.value for i in node.args]
if value < 0 or value >= 2**256:
raise InvalidLiteral("Value out of range for uint256", node.args[0])
if shift < -256 or shift > 256:
# this validation is performed to prevent the compiler from hanging
# rather than for correctness because the post-folded constant would
# have been validated anyway
raise InvalidLiteral("Shift must be between -256 and 256", node.args[1])
if shift < 0:
value = value >> -shift
else:
value = (value << shift) % (2**256)
return vy_ast.Int.from_node(node, value=value)
def fetch_call_return(self, node):
# return type is the type of the first argument
return self.infer_arg_types(node)[0]
def infer_arg_types(self, node):
self._validate_arg_types(node)
# return a concrete type instead of SignedIntegerAbstractType
arg_ty = get_possible_types_from_node(node.args[0])[0]
shift_ty = get_possible_types_from_node(node.args[1])[0]
return [arg_ty, shift_ty]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
# "gshr" -- generalized right shift
argty = args[0].typ
GSHR = sar if argty.is_signed else shr
with args[0].cache_when_complex("to_shift") as (b1, arg), args[1].cache_when_complex(
"bits"
) as (b2, bits):
neg_bits = ["sub", 0, bits]
ret = ["if", ["slt", bits, 0], GSHR(neg_bits, arg), shl(bits, arg)]
return b1.resolve(b2.resolve(IRnode.from_list(ret, typ=argty)))
class _AddMulMod(BuiltinFunction):
_inputs = [("a", UINT256_T), ("b", UINT256_T), ("c", UINT256_T)]
_return_type = UINT256_T
def evaluate(self, node):
validate_call_args(node, 3)
if isinstance(node.args[2], vy_ast.Int) and node.args[2].value == 0:
raise ZeroDivisionException("Modulo by 0", node.args[2])
for arg in node.args:
if not isinstance(arg, vy_ast.Int):
raise UnfoldableNode
if arg.value < 0 or arg.value >= 2**256:
raise InvalidLiteral("Value out of range for uint256", arg)
value = self._eval_fn(node.args[0].value, node.args[1].value) % node.args[2].value
return vy_ast.Int.from_node(node, value=value)
@process_inputs
def build_IR(self, expr, args, kwargs, context):
x, y, z = args
with x.cache_when_complex("x") as (b1, x):
with y.cache_when_complex("y") as (b2, y):
with z.cache_when_complex("z") as (b3, z):
ret = IRnode.from_list(
["seq", ["assert", z], [self._opcode, x, y, z]], typ=UINT256_T
)
return b1.resolve(b2.resolve(b3.resolve(ret)))
class AddMod(_AddMulMod):
_id = "uint256_addmod"
_eval_fn = operator.add
_opcode = "addmod"
class MulMod(_AddMulMod):
_id = "uint256_mulmod"
_eval_fn = operator.mul
_opcode = "mulmod"
class PowMod256(BuiltinFunction):
_id = "pow_mod256"
_inputs = [("a", UINT256_T), ("b", UINT256_T)]
_return_type = UINT256_T
def evaluate(self, node):
validate_call_args(node, 2)
if next((i for i in node.args if not isinstance(i, vy_ast.Int)), None):
raise UnfoldableNode
left, right = node.args
if left.value < 0 or right.value < 0:
raise UnfoldableNode
value = pow(left.value, right.value, 2**256)
return vy_ast.Int.from_node(node, value=value)
def build_IR(self, expr, context):
left = Expr.parse_value_expr(expr.args[0], context)
right = Expr.parse_value_expr(expr.args[1], context)
return IRnode.from_list(["exp", left, right], typ=left.typ)
class Abs(BuiltinFunction):
_id = "abs"
_inputs = [("value", INT256_T)]
_return_type = INT256_T
def evaluate(self, node):
validate_call_args(node, 1)
if not isinstance(node.args[0], vy_ast.Int):
raise UnfoldableNode
value = node.args[0].value
if not SizeLimits.MIN_INT256 <= value <= SizeLimits.MAX_INT256:
raise OverflowException("Literal is outside of allowable range for int256")
value = abs(value)
if not SizeLimits.MIN_INT256 <= value <= SizeLimits.MAX_INT256:
raise OverflowException("Absolute literal value is outside allowable range for int256")
return vy_ast.Int.from_node(node, value=value)
def build_IR(self, expr, context):
value = Expr.parse_value_expr(expr.args[0], context)
sub = [
"with",
"orig",
value,
[
"if",
["slt", "orig", 0],
# clamp orig != -2**255 (because it maps to itself under negation)
["seq", ["assert", ["ne", "orig", ["sub", 0, "orig"]]], ["sub", 0, "orig"]],
"orig",
],
]
return IRnode.from_list(sub, typ=INT256_T)
# CREATE* functions
# create helper functions
# generates CREATE op sequence + zero check for result
def _create_ir(value, buf, length, salt=None, checked=True):
args = [value, buf, length]
create_op = "create"
if salt is not None:
create_op = "create2"
args.append(salt)
ret = IRnode.from_list(
["seq", eval_once_check(_freshname("create_builtin")), [create_op, *args]]
)
if not checked:
return ret
ret = clamp_nonzero(ret)
ret.set_error_msg(f"{create_op} failed")
return ret
# calculate the gas used by create for a given number of bytes
def _create_addl_gas_estimate(size, should_use_create2):
ret = 200 * size
if should_use_create2:
ret += SHA3_PER_WORD * ceil32(size) // 32
return ret
def eip1167_bytecode():
# NOTE cyclic import?
from vyper.ir.compile_ir import assembly_to_evm
loader_asm = [
"PUSH1",
0x2D,
"RETURNDATASIZE",
"DUP2",
"PUSH1",
0x09,
"RETURNDATASIZE",
"CODECOPY",
"RETURN",
]
forwarder_pre_asm = [
"CALLDATASIZE",
"RETURNDATASIZE",
"RETURNDATASIZE",
"CALLDATACOPY",
"RETURNDATASIZE",
"RETURNDATASIZE",
"RETURNDATASIZE",
"CALLDATASIZE",
"RETURNDATASIZE",
"PUSH20", # [address to delegate to]
]
forwarder_post_asm = [
"GAS",
"DELEGATECALL",
"RETURNDATASIZE",
"DUP3",
"DUP1",
"RETURNDATACOPY",
"SWAP1",
"RETURNDATASIZE",
"SWAP2",
"PUSH1",
0x2B, # jumpdest of whole program.
"JUMPI",
"REVERT",
"JUMPDEST",
"RETURN",
]
return (
assembly_to_evm(loader_asm)[0],
assembly_to_evm(forwarder_pre_asm)[0],
assembly_to_evm(forwarder_post_asm)[0],
)
# "standard" initcode for code which can be larger than 256 bytes.
# returns the code starting from 0x0b with len `codesize`.
# NOTE: it assumes codesize <= 2**24.
def _create_preamble(codesize):
from vyper.ir.compile_ir import assembly_to_evm
evm_len = 0x0B # 11 bytes
asm = [
# use PUSH3 to be able to deal with larger contracts
"PUSH3",
# blank space for codesize
0x00,
0x00,
0x00,
"RETURNDATASIZE",
"DUP2",
"PUSH1",
evm_len,
"RETURNDATASIZE",
"CODECOPY",
"RETURN",
]
evm = assembly_to_evm(asm)[0]
assert len(evm) == evm_len, evm
shl_bits = (evm_len - 4) * 8 # codesize needs to go right after the PUSH3
# mask codesize into the aforementioned "blank space"
return ["or", bytes_to_int(evm), shl(shl_bits, codesize)], evm_len
class _CreateBase(BuiltinFunction):
_kwargs = {
"value": KwargSettings(UINT256_T, zero_value),
"salt": KwargSettings(BYTES32_T, empty_value),
}
_return_type = AddressT()
@process_inputs
def build_IR(self, expr, args, kwargs, context):
# errmsg something like "Cannot use {self._id} in pure fn"
context.check_is_not_constant("use {self._id}", expr)
should_use_create2 = "salt" in [kwarg.arg for kwarg in expr.keywords]
if not should_use_create2:
kwargs["salt"] = None
ir_builder = self._build_create_IR(expr, args, context, **kwargs)
add_gas_estimate = self._add_gas_estimate(args, should_use_create2)
return IRnode.from_list(
ir_builder, typ=AddressT(), annotation=self._id, add_gas_estimate=add_gas_estimate
)
class CreateMinimalProxyTo(_CreateBase):
# create an EIP1167 "minimal proxy" to the target contract
_id = "create_minimal_proxy_to"
_inputs = [("target", AddressT())]
def _add_gas_estimate(self, args, should_use_create2):
a, b, c = eip1167_bytecode()
bytecode_len = 20 + len(b) + len(c)
return _create_addl_gas_estimate(bytecode_len, should_use_create2)
def _build_create_IR(self, expr, args, context, value, salt):
target_address = args[0]
buf = context.new_internal_variable(BytesT(96))
loader_evm, forwarder_pre_evm, forwarder_post_evm = eip1167_bytecode()
# Adjust to 32-byte boundaries
preamble_length = len(loader_evm) + len(forwarder_pre_evm)
forwarder_preamble = bytes_to_int(
loader_evm + forwarder_pre_evm + b"\x00" * (32 - preamble_length)
)
forwarder_post = bytes_to_int(forwarder_post_evm + b"\x00" * (32 - len(forwarder_post_evm)))
# left-align the target
if target_address.is_literal:
# note: should move to optimizer once we have
# codesize optimization pipeline
aligned_target = args[0].value << 96
else:
aligned_target = shl(96, target_address)
buf_len = preamble_length + 20 + len(forwarder_post_evm)
return [
"seq",
["mstore", buf, forwarder_preamble],
["mstore", ["add", buf, preamble_length], aligned_target],
["mstore", ["add", buf, preamble_length + 20], forwarder_post],
_create_ir(value, buf, buf_len, salt=salt),
]
class CreateForwarderTo(CreateMinimalProxyTo):
_warned = False
def build_IR(self, expr, context):
if not self._warned:
vyper_warn("`create_forwarder_to` is a deprecated alias of `create_minimal_proxy_to`!")
self._warned = True
return super().build_IR(expr, context)
class CreateCopyOf(_CreateBase):
_id = "create_copy_of"
_inputs = [("target", AddressT())]
@property
def _preamble_len(self):
return 11
def _add_gas_estimate(self, args, should_use_create2):
# max possible runtime length + preamble length
return _create_addl_gas_estimate(EIP_170_LIMIT + self._preamble_len, should_use_create2)
def _build_create_IR(self, expr, args, context, value, salt):
target = args[0]
with target.cache_when_complex("create_target") as (b1, target):
codesize = IRnode.from_list(["extcodesize", target])
msize = IRnode.from_list(["msize"])
with codesize.cache_when_complex("target_codesize") as (
b2,
codesize,
), msize.cache_when_complex("mem_ofst") as (b3, mem_ofst):
ir = ["seq"]
# make sure there is actually code at the target
check_codesize = ["assert", codesize]
ir.append(
IRnode.from_list(check_codesize, error_msg="empty target (create_copy_of)")
)
# store the preamble at msize + 22 (zero padding)
preamble, preamble_len = _create_preamble(codesize)
assert preamble_len == self._preamble_len
ir.append(["mstore", mem_ofst, preamble])
# copy the target code into memory. current layout:
# msize | 00...00 (22 0's) | preamble | bytecode
ir.append(["extcodecopy", target, add_ofst(mem_ofst, 32), 0, codesize])
buf = add_ofst(mem_ofst, 32 - preamble_len)
buf_len = ["add", codesize, preamble_len]
ir.append(_create_ir(value, buf, buf_len, salt))
return b1.resolve(b2.resolve(b3.resolve(ir)))
class CreateFromBlueprint(_CreateBase):
_id = "create_from_blueprint"
_inputs = [("target", AddressT())]
_kwargs = {
"value": KwargSettings(UINT256_T, zero_value),
"salt": KwargSettings(BYTES32_T, empty_value),
"raw_args": KwargSettings(BoolT(), False, require_literal=True),
"code_offset": KwargSettings(UINT256_T, zero_value),
}
_has_varargs = True
def _add_gas_estimate(self, args, should_use_create2):
ctor_args = ir_tuple_from_args(args[1:])
# max possible size of init code
maxlen = EIP_170_LIMIT + ctor_args.typ.abi_type.size_bound()
return _create_addl_gas_estimate(maxlen, should_use_create2)
def _build_create_IR(self, expr, args, context, value, salt, code_offset, raw_args):
target = args[0]
ctor_args = args[1:]
ctor_args = [ensure_in_memory(arg, context) for arg in ctor_args]
if raw_args:
if len(ctor_args) != 1 or not isinstance(ctor_args[0].typ, BytesT):
raise StructureException("raw_args must be used with exactly 1 bytes argument")
argbuf = bytes_data_ptr(ctor_args[0])
argslen = get_bytearray_length(ctor_args[0])
bufsz = ctor_args[0].typ.maxlen
else:
# encode the varargs
to_encode = ir_tuple_from_args(ctor_args)
# pretend we allocated enough memory for the encoder
# (we didn't, but we are clobbering unused memory so it's safe.)
bufsz = to_encode.typ.abi_type.size_bound()
argbuf = IRnode.from_list(
context.new_internal_variable(get_type_for_exact_size(bufsz)), location=MEMORY
)
# return a complex expression which writes to memory and returns
# the length of the encoded data
argslen = abi_encode(argbuf, to_encode, context, bufsz=bufsz, returns_len=True)
# NOTE: we need to invoke the abi encoder before evaluating MSIZE,
# then copy the abi encoded buffer to past-the-end of the initcode
# (since the abi encoder could write to fresh memory).
# it would be good to not require the memory copy, but need
# to evaluate memory safety.
with target.cache_when_complex("create_target") as (b1, target), argslen.cache_when_complex(
"encoded_args_len"
) as (b2, encoded_args_len), code_offset.cache_when_complex("code_ofst") as (b3, codeofst):
codesize = IRnode.from_list(["sub", ["extcodesize", target], codeofst])
# copy code to memory starting from msize. we are clobbering
# unused memory so it's safe.
msize = IRnode.from_list(["msize"], location=MEMORY)
with codesize.cache_when_complex("target_codesize") as (
b4,
codesize,
), msize.cache_when_complex("mem_ofst") as (b5, mem_ofst):
ir = ["seq"]
# make sure there is code at the target, and that
# code_ofst <= (extcodesize target).
# (note if code_ofst > (extcodesize target), would be
# OOG on the EXTCODECOPY)
# (code_ofst == (extcodesize target) would be empty
# initcode, which we disallow for hygiene reasons -
# same as `create_copy_of` on an empty target).
check_codesize = ["assert", ["sgt", codesize, 0]]
ir.append(
IRnode.from_list(
check_codesize, error_msg="empty target (create_from_blueprint)"
)
)
# copy the target code into memory.
# layout starting from mem_ofst:
# 00...00 (22 0's) | preamble | bytecode
ir.append(["extcodecopy", target, mem_ofst, codeofst, codesize])
ir.append(copy_bytes(add_ofst(mem_ofst, codesize), argbuf, encoded_args_len, bufsz))
# theoretically, dst = "msize", but just be safe.
# if len(ctor_args) > 0:
# dst = add_ofst(mem_ofst, codesize)
# encoded_args_len = self._encode_args(dst, ctor_args, context)
# else:
# encoded_args_len = 0
length = ["add", codesize, encoded_args_len]
ir.append(_create_ir(value, mem_ofst, length, salt))
return b1.resolve(b2.resolve(b3.resolve(b4.resolve(b5.resolve(ir)))))
class _UnsafeMath(BuiltinFunction):
# TODO add unsafe math for `decimal`s
_inputs = [("a", IntegerT.any()), ("b", IntegerT.any())]
def __repr__(self):
return f"builtin function unsafe_{self.op}"
def fetch_call_return(self, node):
return_type = self.infer_arg_types(node).pop()
return return_type
def infer_arg_types(self, node):
self._validate_arg_types(node)
types_list = get_common_types(*node.args, filter_fn=lambda x: isinstance(x, IntegerT))
if not types_list:
raise TypeMismatch(f"unsafe_{self.op} called on dislike types", node)
type_ = types_list.pop()
return [type_, type_]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
(a, b) = args
op = self.op
assert a.typ == b.typ, "unreachable"
otyp = a.typ
if op == "div" and a.typ.is_signed:
op = "sdiv"
ret = [op, a, b]
if a.typ.bits < 256:
# wrap for ops which could under/overflow
if a.typ.is_signed:
# e.g. int128 -> (signextend 15 (add x y))
ret = promote_signed_int(ret, a.typ.bits)
else:
# e.g. uint8 -> (mod (add x y) 256)
# TODO mod_bound could be a really large literal
ret = ["mod", ret, 2**a.typ.bits]
return IRnode.from_list(ret, typ=otyp)
# TODO handle decimal case
class UnsafeAdd(_UnsafeMath):
op = "add"
class UnsafeSub(_UnsafeMath):
op = "sub"
class UnsafeMul(_UnsafeMath):
op = "mul"
class UnsafeDiv(_UnsafeMath):
op = "div"
class _MinMax(BuiltinFunction):
_inputs = [("a", (DecimalT(), IntegerT.any())), ("b", (DecimalT(), IntegerT.any()))]
def evaluate(self, node):
validate_call_args(node, 2)
if not isinstance(node.args[0], type(node.args[1])):
raise UnfoldableNode
if not isinstance(node.args[0], (vy_ast.Decimal, vy_ast.Int)):
raise UnfoldableNode
left, right = (i.value for i in node.args)
if isinstance(left, Decimal) and (
min(left, right) < SizeLimits.MIN_AST_DECIMAL
or max(left, right) > SizeLimits.MAX_AST_DECIMAL
):
raise InvalidType("Decimal value is outside of allowable range", node)
types_list = get_common_types(
*node.args, filter_fn=lambda x: isinstance(x, (IntegerT, DecimalT))
)
if not types_list:
raise TypeMismatch("Cannot perform action between dislike numeric types", node)
value = self._eval_fn(left, right)
return type(node.args[0]).from_node(node, value=value)
def fetch_call_return(self, node):
return_type = self.infer_arg_types(node).pop()
return return_type
def infer_arg_types(self, node):
self._validate_arg_types(node)
types_list = get_common_types(
*node.args, filter_fn=lambda x: isinstance(x, (IntegerT, DecimalT))
)
if not types_list:
raise TypeMismatch("Cannot perform action between dislike numeric types", node)
type_ = types_list.pop()
return [type_, type_]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
op = self._opcode
with args[0].cache_when_complex("_l") as (b1, left), args[1].cache_when_complex("_r") as (
b2,
right,
):
if left.typ == right.typ:
if left.typ != UINT256_T:
# if comparing like types that are not uint256, use SLT or SGT
op = f"s{op}"
o = ["select", [op, left, right], left, right]
otyp = left.typ
else:
raise TypeMismatch(f"Minmax types incompatible: {left.typ.typ} {right.typ.typ}")
return IRnode.from_list(b1.resolve(b2.resolve(o)), typ=otyp)
class Min(_MinMax):
_id = "min"
_eval_fn = min
_opcode = "lt"
class Max(_MinMax):
_id = "max"
_eval_fn = max
_opcode = "gt"
class Uint2Str(BuiltinFunction):
_id = "uint2str"
_inputs = [("x", IntegerT.unsigneds())]
def fetch_call_return(self, node):
arg_t = self.infer_arg_types(node)[0]
bits = arg_t.bits
len_needed = math.ceil(bits * math.log(2) / math.log(10))
return StringT(len_needed)
def evaluate(self, node):
validate_call_args(node, 1)
if not isinstance(node.args[0], vy_ast.Int):
raise UnfoldableNode
value = str(node.args[0].value)
return vy_ast.Str.from_node(node, value=value)
def infer_arg_types(self, node):
self._validate_arg_types(node)
input_type = get_possible_types_from_node(node.args[0]).pop()
return [input_type]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
return_t = self.fetch_call_return(expr)
n_digits = return_t.maxlen
with args[0].cache_when_complex("val") as (b1, val):
buf = context.new_internal_variable(return_t)
i = IRnode.from_list(context.fresh_varname("uint2str_i"), typ=UINT256_T)
ret = ["repeat", i, 0, n_digits + 1, n_digits + 1]
body = [
"seq",
[
"if",
["eq", val, 0],
# clobber val, and return it as a pointer
[
"seq",
["mstore", ["sub", buf + n_digits, i], i],
["set", val, ["sub", buf + n_digits, i]],
"break",
],
[
"seq",
["mstore", ["sub", buf + n_digits, i], ["add", 48, ["mod", val, 10]]],
["set", val, ["div", val, 10]],
],
],
]
ret.append(body)
# "0" has hex representation 0x00..0130..00
# if (val == 0) {
# return "0"
# } else {
# do the loop
# }
ret = [
"if",
["eq", val, 0],
["seq", ["mstore", buf + 1, ord("0")], ["mstore", buf, 1], buf],
["seq", ret, val],
]
return b1.resolve(IRnode.from_list(ret, location=MEMORY, typ=return_t))
class Sqrt(BuiltinFunction):
_id = "sqrt"
_inputs = [("d", DecimalT())]
_return_type = DecimalT()
@process_inputs
def build_IR(self, expr, args, kwargs, context):
# TODO fix cyclic dependency with codegen/stmt.py
from ._utils import generate_inline_function
arg = args[0]
# TODO: reify decimal and integer sqrt paths (see isqrt)
sqrt_code = """
assert x >= 0.0
z: decimal = 0.0
if x == 0.0:
z = 0.0
else:
z = x / 2.0 + 0.5
y: decimal = x
for i in range(256):
if z == y:
break
y = z
z = (x / z + z) / 2.0
"""
x_type = DecimalT()
placeholder_copy = ["pass"]
# Steal current position if variable is already allocated.
if arg.value == "mload":
new_var_pos = arg.args[0]
# Other locations need to be copied.
else:
new_var_pos = context.new_internal_variable(x_type)
placeholder_copy = ["mstore", new_var_pos, arg]
# Create input variables.
variables = {"x": VariableRecord(name="x", pos=new_var_pos, typ=x_type, mutable=False)}
# Dictionary to update new (i.e. typecheck) namespace
variables_2 = {"x": VarInfo(DecimalT())}
# Generate inline IR.
new_ctx, sqrt_ir = generate_inline_function(
code=sqrt_code,
variables=variables,
variables_2=variables_2,
memory_allocator=context.memory_allocator,
)
return IRnode.from_list(
["seq", placeholder_copy, sqrt_ir, new_ctx.vars["z"].pos], # load x variable
typ=DecimalT(),
location=MEMORY,
)
class ISqrt(BuiltinFunction):
_id = "isqrt"
_inputs = [("d", UINT256_T)]
_return_type = UINT256_T
@process_inputs
def build_IR(self, expr, args, kwargs, context):
# calculate isqrt using the babylonian method
y, z = "y", "z"
arg = args[0]
with arg.cache_when_complex("x") as (b1, x):
ret = [
"seq",
[
"if",
["ge", y, 2 ** (128 + 8)],
["seq", ["set", y, shr(128, y)], ["set", z, shl(64, z)]],
],
[
"if",
["ge", y, 2 ** (64 + 8)],
["seq", ["set", y, shr(64, y)], ["set", z, shl(32, z)]],
],
[
"if",
["ge", y, 2 ** (32 + 8)],
["seq", ["set", y, shr(32, y)], ["set", z, shl(16, z)]],
],
[
"if",
["ge", y, 2 ** (16 + 8)],
["seq", ["set", y, shr(16, y)], ["set", z, shl(8, z)]],
],
]
ret.append(["set", z, ["div", ["mul", z, ["add", y, 2**16]], 2**18]])
for _ in range(7):
ret.append(["set", z, ["div", ["add", ["div", x, z], z], 2]])
# note: If ``x+1`` is a perfect square, then the Babylonian
# algorithm oscillates between floor(sqrt(x)) and ceil(sqrt(x)) in
# consecutive iterations. return the floor value always.
ret.append(["with", "t", ["div", x, z], ["select", ["lt", z, "t"], z, "t"]])
ret = ["with", y, x, ["with", z, 181, ret]]
return b1.resolve(IRnode.from_list(ret, typ=UINT256_T))
class Empty(TypenameFoldedFunction):
_id = "empty"
def fetch_call_return(self, node):
type_ = self.infer_arg_types(node)[0].typedef
if isinstance(type_, HashMapT):
raise TypeMismatch("Cannot use empty on HashMap", node)
return type_
@process_inputs
def build_IR(self, expr, args, kwargs, context):
output_type = args[0]
return IRnode("~empty", typ=output_type)
class Breakpoint(BuiltinFunction):
_id = "breakpoint"
_inputs: list = []
_warned = False
def fetch_call_return(self, node):
if not self._warned:
vyper_warn("`breakpoint` should only be used for debugging!\n" + node._annotated_source)
self._warned = True
return None
@process_inputs
def build_IR(self, expr, args, kwargs, context):
return IRnode.from_list("breakpoint", annotation="breakpoint()")
class Print(BuiltinFunction):
_id = "print"
_inputs: list = []
_has_varargs = True
_kwargs = {"hardhat_compat": KwargSettings(BoolT(), False, require_literal=True)}
_warned = False
def fetch_call_return(self, node):
if not self._warned:
vyper_warn("`print` should only be used for debugging!\n" + node._annotated_source)
self._warned = True
return None
@process_inputs
def build_IR(self, expr, args, kwargs, context):
args_as_tuple = ir_tuple_from_args(args)
args_abi_t = args_as_tuple.typ.abi_type
# create a signature like "log(uint256)"
sig = "log" + "(" + ",".join([arg.typ.abi_type.selector_name() for arg in args]) + ")"
if kwargs["hardhat_compat"] is True:
method_id = method_id_int(sig)
buflen = 32 + args_abi_t.size_bound()
# 32 bytes extra space for the method id
buf = context.new_internal_variable(get_type_for_exact_size(buflen))
ret = ["seq"]
ret.append(["mstore", buf, method_id])
encode = abi_encode(buf + 32, args_as_tuple, context, buflen, returns_len=True)
else:
method_id = method_id_int("log(string,bytes)")
schema = args_abi_t.selector_name().encode("utf-8")
if len(schema) > 32:
raise CompilerPanic("print signature too long: {schema}")
schema_t = StringT(len(schema))
schema_buf = context.new_internal_variable(schema_t)
ret = ["seq"]
ret.append(["mstore", schema_buf, len(schema)])
# TODO use Expr.make_bytelike, or better have a `bytestring` IRnode type
ret.append(["mstore", schema_buf + 32, bytes_to_int(schema.ljust(32, b"\x00"))])
payload_buflen = args_abi_t.size_bound()
payload_t = BytesT(payload_buflen)
# 32 bytes extra space for the method id
payload_buf = context.new_internal_variable(payload_t)
encode_payload = abi_encode(
payload_buf + 32, args_as_tuple, context, payload_buflen, returns_len=True
)
ret.append(["mstore", payload_buf, encode_payload])
args_as_tuple = ir_tuple_from_args(
[
IRnode.from_list(schema_buf, typ=schema_t, location=MEMORY),
IRnode.from_list(payload_buf, typ=payload_t, location=MEMORY),
]
)
# add 32 for method id padding
buflen = 32 + args_as_tuple.typ.abi_type.size_bound()
buf = context.new_internal_variable(get_type_for_exact_size(buflen))
ret.append(["mstore", buf, method_id])
encode = abi_encode(buf + 32, args_as_tuple, context, buflen, returns_len=True)
# debug address that tooling uses
CONSOLE_ADDRESS = 0x000000000000000000636F6E736F6C652E6C6F67
ret.append(["staticcall", "gas", CONSOLE_ADDRESS, buf + 28, ["add", 4, encode], 0, 0])
return IRnode.from_list(ret, annotation="print:" + sig)
class ABIEncode(BuiltinFunction):
_id = "_abi_encode" # TODO prettier to rename this to abi.encode
# signature: *, ensure_tuple=<literal_bool> -> Bytes[<calculated len>]
# (check the signature manually since we have no utility methods
# to handle varargs.)
# explanation of ensure_tuple:
# default is to force even a single value into a tuple,
# e.g. _abi_encode(bytes) -> _abi_encode((bytes,))
# _abi_encode((bytes,)) -> _abi_encode(((bytes,),))
# this follows the encoding convention for functions:
# ://docs.soliditylang.org/en/v0.8.6/abi-spec.html#function-selector-and-argument-encoding
# if this is turned off, then bytes will be encoded as bytes.
_inputs: list = []
_has_varargs = True
_kwargs = {
"ensure_tuple": KwargSettings(BoolT(), True, require_literal=True),
"method_id": KwargSettings((BYTES4_T, BytesT(4)), None, require_literal=True),
}
def infer_kwarg_types(self, node):
ret = {}
for kwarg in node.keywords:
kwarg_name = kwarg.arg
validate_expected_type(kwarg.value, self._kwargs[kwarg_name].typ)
ret[kwarg_name] = get_exact_type_from_node(kwarg.value)
return ret
def fetch_call_return(self, node):
self._validate_arg_types(node)
ensure_tuple = next(
(arg.value.value for arg in node.keywords if arg.arg == "ensure_tuple"), True
)
assert isinstance(ensure_tuple, bool)
has_method_id = "method_id" in [arg.arg for arg in node.keywords]
# figure out the output type by converting
# the types to ABI_Types and calling size_bound API
arg_abi_types = []
arg_types = self.infer_arg_types(node)
for arg_t in arg_types:
arg_abi_types.append(arg_t.abi_type)
# special case, no tuple
if len(arg_abi_types) == 1 and not ensure_tuple:
arg_abi_t = arg_abi_types[0]
else:
arg_abi_t = ABI_Tuple(arg_abi_types)
maxlen = arg_abi_t.size_bound()
if has_method_id:
# the output includes 4 bytes for the method_id.
maxlen += 4
ret = BytesT()
ret.set_length(maxlen)
return ret
@staticmethod
def _parse_method_id(method_id_literal):
if method_id_literal is None:
return None
if isinstance(method_id_literal, bytes):
assert len(method_id_literal) == 4
return fourbytes_to_int(method_id_literal)
if method_id_literal.startswith("0x"):
method_id_literal = method_id_literal[2:]
return fourbytes_to_int(bytes.fromhex(method_id_literal))
@process_inputs
def build_IR(self, expr, args, kwargs, context):
ensure_tuple = kwargs["ensure_tuple"]
method_id = self._parse_method_id(kwargs["method_id"])
if len(args) < 1:
raise StructureException("abi_encode expects at least one argument", expr)
# figure out the required length for the output buffer
if len(args) == 1 and not ensure_tuple:
# special case, no tuple
encode_input = args[0]
else:
encode_input = ir_tuple_from_args(args)
input_abi_t = encode_input.typ.abi_type
maxlen = input_abi_t.size_bound()
if method_id is not None:
maxlen += 4
buf_t = BytesT(maxlen)
assert self.fetch_call_return(expr).length == maxlen
buf = context.new_internal_variable(buf_t)
ret = ["seq"]
if method_id is not None:
# <32 bytes length> | <4 bytes method_id> | <everything else>
# write the unaligned method_id first, then we will
# overwrite the 28 bytes of zeros with the bytestring length
ret += [["mstore", buf + 4, method_id]]
# abi encode, and grab length as stack item
length = abi_encode(buf + 36, encode_input, context, returns_len=True, bufsz=maxlen)
# write the output length to where bytestring stores its length
ret += [["mstore", buf, ["add", length, 4]]]
else:
# abi encode and grab length as stack item
length = abi_encode(buf + 32, encode_input, context, returns_len=True, bufsz=maxlen)
# write the output length to where bytestring stores its length
ret += [["mstore", buf, length]]
# return the buf location
# TODO location is statically known, optimize this out
ret += [buf]
return IRnode.from_list(ret, location=MEMORY, typ=buf_t)
class ABIDecode(BuiltinFunction):
_id = "_abi_decode"
_inputs = [("data", BytesT.any()), ("output_type", "TYPE_DEFINITION")]
_kwargs = {"unwrap_tuple": KwargSettings(BoolT(), True, require_literal=True)}
def fetch_call_return(self, node):
_, output_type = self.infer_arg_types(node)
return output_type.typedef
def infer_arg_types(self, node):
validate_call_args(node, 2, ["unwrap_tuple"])
data_type = get_exact_type_from_node(node.args[0])
output_typedef = TYPE_T(type_from_annotation(node.args[1]))
return [data_type, output_typedef]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
unwrap_tuple = kwargs["unwrap_tuple"]
data = args[0]
output_typ = args[1]
wrapped_typ = output_typ
if unwrap_tuple is True:
wrapped_typ = calculate_type_for_external_return(output_typ)
abi_size_bound = wrapped_typ.abi_type.size_bound()
abi_min_size = wrapped_typ.abi_type.min_size()
# Get the size of data
input_max_len = data.typ.maxlen
assert abi_min_size <= abi_size_bound, "bad abi type"
if input_max_len < abi_size_bound:
raise StructureException(
(
"Mismatch between size of input and size of decoded types. "
f"length of ABI-encoded {wrapped_typ} must be equal to or greater "
f"than {abi_size_bound}"
),
expr.args[0],
)
data = ensure_in_memory(data, context)
with data.cache_when_complex("to_decode") as (b1, data):
data_ptr = bytes_data_ptr(data)
data_len = get_bytearray_length(data)
# Normally, ABI-encoded data assumes the argument is a tuple
# (See comments for `wrap_value_for_external_return`)
# However, we do not want to use `wrap_value_for_external_return`
# technique as used in external call codegen because in order to be
# type-safe we would need an extra memory copy. To avoid a copy,
# we manually add the ABI-dynamic offset so that it is
# re-interpreted in-place.
if (
unwrap_tuple is True
and needs_external_call_wrap(output_typ)
and output_typ.abi_type.is_dynamic()
):
data_ptr = add_ofst(data_ptr, 32)
ret = ["seq"]
if abi_min_size == abi_size_bound:
ret.append(["assert", ["eq", abi_min_size, data_len]])
else:
# runtime assert: abi_min_size <= data_len <= abi_size_bound
ret.append(clamp2(abi_min_size, data_len, abi_size_bound, signed=False))
# return pointer to the buffer
ret.append(data_ptr)
return b1.resolve(
IRnode.from_list(
ret,
typ=output_typ,
location=data.location,
encoding=Encoding.ABI,
annotation=f"abi_decode({output_typ})",
)
)
class _MinMaxValue(TypenameFoldedFunction):
def evaluate(self, node):
self._validate_arg_types(node)
input_type = type_from_annotation(node.args[0])
if not isinstance(input_type, (IntegerT, DecimalT)):
raise InvalidType(f"Expected numeric type but got {input_type} instead", node)
val = self._eval(input_type)
if isinstance(input_type, DecimalT):
ret = vy_ast.Decimal.from_node(node, value=val)
if isinstance(input_type, IntegerT):
ret = vy_ast.Int.from_node(node, value=val)
# TODO: to change to known_type once #3213 is merged
ret._metadata["type"] = input_type
return ret
class MinValue(_MinMaxValue):
_id = "min_value"
def _eval(self, type_):
return type_.ast_bounds[0]
class MaxValue(_MinMaxValue):
_id = "max_value"
def _eval(self, type_):
return type_.ast_bounds[1]
class Epsilon(TypenameFoldedFunction):
_id = "epsilon"
def evaluate(self, node):
self._validate_arg_types(node)
input_type = type_from_annotation(node.args[0])
if not input_type.compare_type(DecimalT()):
raise InvalidType(f"Expected decimal type but got {input_type} instead", node)
return vy_ast.Decimal.from_node(node, value=input_type.epsilon)
DISPATCH_TABLE = {
"_abi_encode": ABIEncode(),
"_abi_decode": ABIDecode(),
"floor": Floor(),
"ceil": Ceil(),
"convert": Convert(),
"slice": Slice(),
"len": Len(),
"concat": Concat(),
"sha256": Sha256(),
"method_id": MethodID(),
"keccak256": Keccak256(),
"ecrecover": ECRecover(),
"ecadd": ECAdd(),
"ecmul": ECMul(),
"extract32": Extract32(),
"as_wei_value": AsWeiValue(),
"raw_call": RawCall(),
"blockhash": BlockHash(),
"bitwise_and": BitwiseAnd(),
"bitwise_or": BitwiseOr(),
"bitwise_xor": BitwiseXor(),
"bitwise_not": BitwiseNot(),
"uint256_addmod": AddMod(),
"uint256_mulmod": MulMod(),
"unsafe_add": UnsafeAdd(),
"unsafe_sub": UnsafeSub(),
"unsafe_mul": UnsafeMul(),
"unsafe_div": UnsafeDiv(),
"pow_mod256": PowMod256(),
"uint2str": Uint2Str(),
"isqrt": ISqrt(),
"sqrt": Sqrt(),
"shift": Shift(),
"create_minimal_proxy_to": CreateMinimalProxyTo(),
"create_forwarder_to": CreateForwarderTo(),
"create_copy_of": CreateCopyOf(),
"create_from_blueprint": CreateFromBlueprint(),
"min": Min(),
"max": Max(),
"empty": Empty(),
"abs": Abs(),
"min_value": MinValue(),
"max_value": MaxValue(),
"epsilon": Epsilon(),
}
STMT_DISPATCH_TABLE = {
"send": Send(),
"print": Print(),
"breakpoint": Breakpoint(),
"selfdestruct": SelfDestruct(),
"raw_call": RawCall(),
"raw_log": RawLog(),
"raw_revert": RawRevert(),
"create_minimal_proxy_to": CreateMinimalProxyTo(),
"create_forwarder_to": CreateForwarderTo(),
"create_copy_of": CreateCopyOf(),
"create_from_blueprint": CreateFromBlueprint(),
}
BUILTIN_FUNCTIONS = {**STMT_DISPATCH_TABLE, **DISPATCH_TABLE}.keys()
def get_builtin_functions():
return {**STMT_DISPATCH_TABLE, **DISPATCH_TABLE}
|
GHSA-c647-pxm2-c52w
|
vyper/codegen/ir_node.py
|
@@ -1,3 +1,4 @@
+import contextlib
import re
from enum import Enum, auto
from functools import cached_property
@@ -46,6 +47,77 @@ class Encoding(Enum):
# future: packed
+# shortcut for chaining multiple cache_when_complex calls
+# CMC 2023-08-10 remove this and scope_together _as soon as_ we have
+# real variables in IR (that we can declare without explicit scoping -
+# needs liveness analysis).
[email protected]
+def scope_multi(ir_nodes, names):
+ assert len(ir_nodes) == len(names)
+
+ builders = []
+ scoped_ir_nodes = []
+
+ class _MultiBuilder:
+ def resolve(self, body):
+ # sanity check that it's initialized properly
+ assert len(builders) == len(ir_nodes)
+ ret = body
+ for b in reversed(builders):
+ ret = b.resolve(ret)
+ return ret
+
+ mb = _MultiBuilder()
+
+ with contextlib.ExitStack() as stack:
+ for arg, name in zip(ir_nodes, names):
+ b, ir_node = stack.enter_context(arg.cache_when_complex(name))
+
+ builders.append(b)
+ scoped_ir_nodes.append(ir_node)
+
+ yield mb, scoped_ir_nodes
+
+
+# create multiple with scopes if any of the items are complex, to force
+# ordering of side effects.
[email protected]
+def scope_together(ir_nodes, names):
+ assert len(ir_nodes) == len(names)
+
+ should_scope = any(s._optimized.is_complex_ir for s in ir_nodes)
+
+ class _Builder:
+ def resolve(self, body):
+ if not should_scope:
+ # uses of the variable have already been inlined
+ return body
+
+ ret = body
+ # build with scopes from inside-out (hence reversed)
+ for arg, name in reversed(list(zip(ir_nodes, names))):
+ ret = ["with", name, arg, ret]
+
+ if isinstance(body, IRnode):
+ return IRnode.from_list(
+ ret, typ=body.typ, location=body.location, encoding=body.encoding
+ )
+ else:
+ return ret
+
+ b = _Builder()
+
+ if should_scope:
+ ir_vars = tuple(
+ IRnode.from_list(name, typ=arg.typ, location=arg.location, encoding=arg.encoding)
+ for (arg, name) in zip(ir_nodes, names)
+ )
+ yield b, ir_vars
+ else:
+ # inline them
+ yield b, ir_nodes
+
+
# this creates a magical block which maps to IR `with`
class _WithBuilder:
def __init__(self, ir_node, name, should_inline=False):
@@ -326,14 +398,15 @@ def _check(condition, err):
def gas(self):
return self._gas + self.add_gas_estimate
- # the IR should be cached.
- # TODO make this private. turns out usages are all for the caching
- # idiom that cache_when_complex addresses
+ # the IR should be cached and/or evaluated exactly once
@property
def is_complex_ir(self):
# list of items not to cache. note can add other env variables
# which do not change, e.g. calldatasize, coinbase, etc.
- do_not_cache = {"~empty", "calldatasize"}
+ # reads (from memory or storage) should not be cached because
+ # they can have or be affected by side effects.
+ do_not_cache = {"~empty", "calldatasize", "callvalue"}
+
return (
isinstance(self.value, str)
and (self.value.lower() in VALID_IR_MACROS or self.value.upper() in get_ir_opcodes())
|
import re
from enum import Enum, auto
from functools import cached_property
from typing import Any, List, Optional, Tuple, Union
from vyper.compiler.settings import VYPER_COLOR_OUTPUT
from vyper.evm.address_space import AddrSpace
from vyper.evm.opcodes import get_ir_opcodes
from vyper.exceptions import CodegenPanic, CompilerPanic
from vyper.semantics.types import VyperType
from vyper.utils import VALID_IR_MACROS, ceil32
# Set default string representation for ints in IR output.
AS_HEX_DEFAULT = False
if VYPER_COLOR_OUTPUT:
OKBLUE = "\033[94m"
OKMAGENTA = "\033[35m"
OKLIGHTMAGENTA = "\033[95m"
OKLIGHTBLUE = "\033[94m"
ENDC = "\033[0m"
else:
OKBLUE = ""
OKMAGENTA = ""
OKLIGHTMAGENTA = ""
OKLIGHTBLUE = ""
ENDC = ""
class NullAttractor(int):
def __add__(self, other: int) -> "NullAttractor":
return NullAttractor()
def __repr__(self) -> str:
return "None"
__radd__ = __add__
__mul__ = __add__
class Encoding(Enum):
# vyper encoding, default for memory variables
VYPER = auto()
# abi encoded, default for args/return values from external funcs
ABI = auto()
# future: packed
# this creates a magical block which maps to IR `with`
class _WithBuilder:
def __init__(self, ir_node, name, should_inline=False):
if should_inline and ir_node._optimized.is_complex_ir:
# this can only mean trouble
raise CompilerPanic("trying to inline a complex IR node")
self.ir_node = ir_node
# whether or not to inline the ir_node
self.should_inline = should_inline
# a named IR variable which represents the
# output of `ir_node`
self.ir_var = IRnode.from_list(
name, typ=ir_node.typ, location=ir_node.location, encoding=ir_node.encoding
)
def __enter__(self):
if self.should_inline:
# return the value instead of the named variable
# so it can be inlined
return self, self.ir_node
else:
# return the named variable
return self, self.ir_var
def __exit__(self, *args):
pass
# MUST be called at the end of building the expression
# in order to make sure the expression gets wrapped correctly
def resolve(self, body):
if self.should_inline:
# uses of the variable have already been inlined
return body
ret = ["with", self.ir_var, self.ir_node, body]
if isinstance(body, IRnode):
return IRnode.from_list(
ret, typ=body.typ, location=body.location, encoding=body.encoding
)
else:
return ret
# Data structure for IR parse tree
class IRnode:
repr_show_gas = False
_gas: int
valency: int
args: List["IRnode"]
value: Union[str, int]
def __init__(
self,
value: Union[str, int],
args: List["IRnode"] = None,
typ: VyperType = None,
location: Optional[AddrSpace] = None,
source_pos: Optional[Tuple[int, int]] = None,
annotation: Optional[str] = None,
error_msg: Optional[str] = None,
mutable: bool = True,
add_gas_estimate: int = 0,
encoding: Encoding = Encoding.VYPER,
):
if args is None:
args = []
self.value = value
self.args = args
# TODO remove this sanity check once mypy is more thorough
assert isinstance(typ, VyperType) or typ is None, repr(typ)
self.typ = typ
self.location = location
self.source_pos = source_pos
self.error_msg = error_msg
self.annotation = annotation
self.mutable = mutable
self.add_gas_estimate = add_gas_estimate
self.encoding = encoding
self.as_hex = AS_HEX_DEFAULT
def _check(condition, err):
if not condition:
raise CompilerPanic(str(err))
_check(self.value is not None, "None is not allowed as IRnode value")
# Determine this node's valency (1 if it pushes a value on the stack,
# 0 otherwise) and checks to make sure the number and valencies of
# children are correct. Also, find an upper bound on gas consumption
# Numbers
if isinstance(self.value, int):
_check(len(self.args) == 0, "int can't have arguments")
# integers must be in the range (MIN_INT256, MAX_UINT256)
_check(-(2**255) <= self.value < 2**256, "out of range")
self.valency = 1
self._gas = 5
elif isinstance(self.value, bytes):
# a literal bytes value, probably inside a "data" node.
_check(len(self.args) == 0, "bytes can't have arguments")
self.valency = 0
self._gas = 0
elif isinstance(self.value, str):
# Opcodes and pseudo-opcodes (e.g. clamp)
if self.value.upper() in get_ir_opcodes():
_, ins, outs, gas = get_ir_opcodes()[self.value.upper()]
self.valency = outs
_check(
len(self.args) == ins,
f"Number of arguments mismatched: {self.value} {self.args}",
)
# We add 2 per stack height at push time and take it back
# at pop time; this makes `break` easier to handle
self._gas = gas + 2 * (outs - ins)
for arg in self.args:
# pop and pass are used to push/pop values on the stack to be
# consumed for internal functions, therefore we whitelist this as a zero valency
# allowed argument.
zero_valency_whitelist = {"pass", "pop"}
_check(
arg.valency == 1 or arg.value in zero_valency_whitelist,
f"invalid argument to `{self.value}`: {arg}",
)
self._gas += arg.gas
# Dynamic gas cost: 8 gas for each byte of logging data
if self.value.upper()[0:3] == "LOG" and isinstance(self.args[1].value, int):
self._gas += self.args[1].value * 8
# Dynamic gas cost: non-zero-valued call
if self.value.upper() == "CALL" and self.args[2].value != 0:
self._gas += 34000
# Dynamic gas cost: filling sstore (ie. not clearing)
elif self.value.upper() == "SSTORE" and self.args[1].value != 0:
self._gas += 15000
# Dynamic gas cost: calldatacopy
elif self.value.upper() in ("CALLDATACOPY", "CODECOPY", "EXTCODECOPY"):
size = 34000
size_arg_index = 3 if self.value.upper() == "EXTCODECOPY" else 2
size_arg = self.args[size_arg_index]
if isinstance(size_arg.value, int):
size = size_arg.value
self._gas += ceil32(size) // 32 * 3
# Gas limits in call
if self.value.upper() == "CALL" and isinstance(self.args[0].value, int):
self._gas += self.args[0].value
# If statements
elif self.value == "if":
if len(self.args) == 3:
self._gas = self.args[0].gas + max(self.args[1].gas, self.args[2].gas) + 3
if len(self.args) == 2:
self._gas = self.args[0].gas + self.args[1].gas + 17
_check(
self.args[0].valency > 0,
f"zerovalent argument as a test to an if statement: {self.args[0]}",
)
_check(len(self.args) in (2, 3), "if statement can only have 2 or 3 arguments")
self.valency = self.args[1].valency
# With statements: with <var> <initial> <statement>
elif self.value == "with":
_check(len(self.args) == 3, self)
_check(
len(self.args[0].args) == 0 and isinstance(self.args[0].value, str),
f"first argument to with statement must be a variable name: {self.args[0]}",
)
_check(
self.args[1].valency == 1 or self.args[1].value == "pass",
f"zerovalent argument to with statement: {self.args[1]}",
)
self.valency = self.args[2].valency
self._gas = sum([arg.gas for arg in self.args]) + 5
# Repeat statements: repeat <index_name> <startval> <rounds> <rounds_bound> <body>
elif self.value == "repeat":
_check(
len(self.args) == 5, "repeat(index_name, startval, rounds, rounds_bound, body)"
)
counter_ptr = self.args[0]
start = self.args[1]
repeat_count = self.args[2]
repeat_bound = self.args[3]
body = self.args[4]
_check(
isinstance(repeat_bound.value, int) and repeat_bound.value > 0,
f"repeat bound must be a compile-time positive integer: {self.args[2]}",
)
_check(repeat_count.valency == 1, repeat_count)
_check(counter_ptr.valency == 1, counter_ptr)
_check(start.valency == 1, start)
self.valency = 0
self._gas = counter_ptr.gas + start.gas
self._gas += 3 # gas for repeat_bound
int_bound = int(repeat_bound.value)
self._gas += int_bound * (body.gas + 50) + 30
if repeat_count != repeat_bound:
# gas for assert(repeat_count <= repeat_bound)
self._gas += 18
# Seq statements: seq <statement> <statement> ...
elif self.value == "seq":
self.valency = self.args[-1].valency if self.args else 0
self._gas = sum([arg.gas for arg in self.args]) + 30
# GOTO is a jump with args
# e.g. (goto my_label x y z) will push x y and z onto the stack,
# then JUMP to my_label.
elif self.value in ("goto", "exit_to"):
for arg in self.args:
_check(
arg.valency == 1 or arg.value == "pass",
f"zerovalent argument to goto {arg}",
)
self.valency = 0
self._gas = sum([arg.gas for arg in self.args])
elif self.value == "label":
_check(
self.args[1].value == "var_list",
f"2nd argument to label must be var_list, {self}",
)
_check(len(args) == 3, f"label should have 3 args but has {len(args)}, {self}")
self.valency = 0
self._gas = 1 + sum(t.gas for t in self.args)
elif self.value == "unique_symbol":
# a label which enforces uniqueness, and does not appear
# in generated bytecode. this is useful for generating
# internal assertions that a particular IR fragment only
# occurs a single time in a program. note that unique_symbol
# must be distinct from all `unique_symbol`s AS WELL AS all
# `label`s, otherwise IR-to-assembly will raise an exception.
self.valency = 0
self._gas = 0
# var_list names a variable number stack variables
elif self.value == "var_list":
for arg in self.args:
if not isinstance(arg.value, str) or len(arg.args) > 0:
raise CodegenPanic(f"var_list only takes strings: {self.args}")
self.valency = 0
self._gas = 0
# Multi statements: multi <expr> <expr> ...
elif self.value == "multi":
for arg in self.args:
_check(
arg.valency > 0, f"Multi expects all children to not be zerovalent: {arg}"
)
self.valency = sum([arg.valency for arg in self.args])
self._gas = sum([arg.gas for arg in self.args])
elif self.value == "deploy":
self.valency = 0
_check(len(self.args) == 3, f"`deploy` should have three args {self}")
self._gas = NullAttractor() # unknown
# Stack variables
else:
self.valency = 1
self._gas = 3
elif self.value is None:
self.valency = 1
# None IRnodes always get compiled into something else, e.g.
# mzero or PUSH1 0, and the gas will get re-estimated then.
self._gas = 3
else:
raise CompilerPanic(f"Invalid value for IR AST node: {self.value}")
assert isinstance(self.args, list)
# TODO would be nice to rename to `gas_estimate` or `gas_bound`
@property
def gas(self):
return self._gas + self.add_gas_estimate
# the IR should be cached.
# TODO make this private. turns out usages are all for the caching
# idiom that cache_when_complex addresses
@property
def is_complex_ir(self):
# list of items not to cache. note can add other env variables
# which do not change, e.g. calldatasize, coinbase, etc.
do_not_cache = {"~empty", "calldatasize"}
return (
isinstance(self.value, str)
and (self.value.lower() in VALID_IR_MACROS or self.value.upper() in get_ir_opcodes())
and self.value.lower() not in do_not_cache
)
# set an error message and push down into all children.
# useful for overriding an error message generated by a helper
# function with a more specific error message.
def set_error_msg(self, error_msg: str) -> None:
self.error_msg = error_msg
for arg in self.args:
arg.set_error_msg(error_msg)
# get the unique symbols contained in this node, which provides
# sanity check invariants for the optimizer.
# cache because it's a perf hotspot. note that this (and other cached
# properties!) can get borked if `self.args` are mutated in such a way
# which changes the child `.unique_symbols`. in the future it would
# be good to tighten down the hatches so it is harder to modify
# IRnode member variables.
@cached_property
def unique_symbols(self):
ret = set()
if self.value == "unique_symbol":
ret.add(self.args[0].value)
children = self.args
if self.value == "deploy":
children = [self.args[0], self.args[2]]
for arg in children:
s = arg.unique_symbols
non_uniques = ret.intersection(s)
assert len(non_uniques) == 0, f"non-unique symbols {non_uniques}"
ret |= s
return ret
@property
def is_literal(self):
return isinstance(self.value, int) or self.value == "multi"
@property
def is_pointer(self):
# not used yet but should help refactor/clarify downstream code
# eventually
return self.location is not None
@property # probably could be cached_property but be paranoid
def _optimized(self):
# TODO figure out how to fix this circular import
from vyper.ir.optimizer import optimize
return optimize(self)
# This function is slightly confusing but abstracts a common pattern:
# when an IR value needs to be computed once and then cached as an
# IR value (if it is expensive, or more importantly if its computation
# includes side-effects), cache it as an IR variable named with the
# `name` param, and execute the `body` with the cached value. Otherwise,
# run the `body` without caching the IR variable.
# Note that this may be an unneeded abstraction in the presence of an
# arbitrarily powerful optimization framework (which can detect unneeded
# caches) but for now still necessary - CMC 2021-12-11.
# usage:
# ```
# with ir_node.cache_when_complex("foo") as builder, foo:
# ret = some_function(foo)
# return builder.resolve(ret)
# ```
def cache_when_complex(self, name):
# for caching purposes, see if the ir_node will be optimized
# because a non-literal expr could turn into a literal,
# (e.g. `(add 1 2)`)
# TODO this could really be moved into optimizer.py
should_inline = not self._optimized.is_complex_ir
return _WithBuilder(self, name, should_inline)
@cached_property
def referenced_variables(self):
ret = set()
for arg in self.args:
ret |= arg.referenced_variables
ret |= getattr(self, "_referenced_variables", set())
return ret
@cached_property
def contains_self_call(self):
return getattr(self, "is_self_call", False) or any(x.contains_self_call for x in self.args)
def __getitem__(self, i):
return self.to_list()[i]
def __len__(self):
return len(self.to_list())
# TODO this seems like a not useful and also confusing function
# check if dead code and remove - CMC 2021-12-13
def to_list(self):
return [self.value] + [a.to_list() for a in self.args]
def __eq__(self, other):
return (
self.value == other.value
and self.args == other.args
and self.typ == other.typ
and self.location == other.location
and self.source_pos == other.source_pos
and self.annotation == other.annotation
and self.mutable == other.mutable
and self.add_gas_estimate == other.add_gas_estimate
and self.valency == other.valency
)
@property
def repr_value(self):
if isinstance(self.value, int) and self.as_hex:
return hex(self.value)
if not isinstance(self.value, str):
return str(self.value)
return self.value
@staticmethod
def _colorise_keywords(val):
if val.lower() in VALID_IR_MACROS: # highlight macro
return OKLIGHTMAGENTA + val + ENDC
elif val.upper() in get_ir_opcodes().keys():
return OKMAGENTA + val + ENDC
return val
def repr(self) -> str:
if not len(self.args):
if self.annotation:
return f"{self.repr_value} " + OKLIGHTBLUE + f"<{self.annotation}>" + ENDC
else:
return str(self.repr_value)
# x = repr(self.to_list())
# if len(x) < 80:
# return x
o = ""
if self.annotation:
o += f"/* {self.annotation} */ \n"
if self.repr_show_gas and self.gas:
o += OKBLUE + "{" + ENDC + str(self.gas) + OKBLUE + "} " + ENDC # add gas for info.
o += "[" + self._colorise_keywords(self.repr_value)
prev_lineno = self.source_pos[0] if self.source_pos else None
arg_lineno = None
annotated = False
has_inner_newlines = False
for arg in self.args:
o += ",\n "
arg_lineno = arg.source_pos[0] if arg.source_pos else None
if arg_lineno is not None and arg_lineno != prev_lineno and self.value in ("seq", "if"):
o += f"# Line {(arg_lineno)}\n "
prev_lineno = arg_lineno
annotated = True
arg_repr = arg.repr()
if "\n" in arg_repr:
has_inner_newlines = True
sub = arg_repr.replace("\n", "\n ").strip(" ")
o += self._colorise_keywords(sub)
output = o.rstrip(" ") + "]"
output_on_one_line = re.sub(r",\n *", ", ", output).replace("\n", "")
should_output_single_line = (
(len(output_on_one_line) < 80 or len(self.args) == 1) and not annotated
) and not has_inner_newlines
if should_output_single_line:
return output_on_one_line
else:
return output
def __repr__(self):
return self.repr()
@classmethod
def from_list(
cls,
obj: Any,
typ: VyperType = None,
location: Optional[AddrSpace] = None,
source_pos: Optional[Tuple[int, int]] = None,
annotation: Optional[str] = None,
error_msg: Optional[str] = None,
mutable: bool = True,
add_gas_estimate: int = 0,
encoding: Encoding = Encoding.VYPER,
) -> "IRnode":
if isinstance(typ, str):
raise CompilerPanic(f"Expected type, not string: {typ}")
if isinstance(obj, IRnode):
# note: this modify-and-returnclause is a little weird since
# the input gets modified. CC 20191121.
if typ is not None:
obj.typ = typ
if obj.source_pos is None:
obj.source_pos = source_pos
if obj.location is None:
obj.location = location
if obj.encoding is None:
obj.encoding = encoding
if obj.error_msg is None:
obj.error_msg = error_msg
return obj
elif not isinstance(obj, list):
return cls(
obj,
[],
typ,
location=location,
annotation=annotation,
mutable=mutable,
add_gas_estimate=add_gas_estimate,
source_pos=source_pos,
encoding=encoding,
error_msg=error_msg,
)
else:
return cls(
obj[0],
[cls.from_list(o, source_pos=source_pos) for o in obj[1:]],
typ,
location=location,
annotation=annotation,
mutable=mutable,
source_pos=source_pos,
add_gas_estimate=add_gas_estimate,
encoding=encoding,
error_msg=error_msg,
)
|
GHSA-c647-pxm2-c52w
|
airflow/www/auth.py
|
@@ -38,7 +38,8 @@ def decorated(*args, **kwargs):
appbuilder = current_app.appbuilder
dag_id = (
- request.args.get("dag_id")
+ kwargs.get("dag_id")
+ or request.args.get("dag_id")
or request.form.get("dag_id")
or (request.is_json and request.json.get("dag_id"))
or None
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import wraps
from typing import Callable, Sequence, TypeVar, cast
from flask import current_app, flash, g, redirect, render_template, request, url_for
from airflow.configuration import conf
from airflow.utils.net import get_hostname
T = TypeVar("T", bound=Callable)
def has_access(permissions: Sequence[tuple[str, str]] | None = None) -> Callable[[T], T]:
"""Factory for decorator that checks current user's permissions against required permissions."""
def requires_access_decorator(func: T):
@wraps(func)
def decorated(*args, **kwargs):
__tracebackhide__ = True # Hide from pytest traceback.
appbuilder = current_app.appbuilder
dag_id = (
request.args.get("dag_id")
or request.form.get("dag_id")
or (request.is_json and request.json.get("dag_id"))
or None
)
if appbuilder.sm.check_authorization(permissions, dag_id):
return func(*args, **kwargs)
elif not g.user.is_anonymous and not g.user.perms:
return (
render_template(
"airflow/no_roles_permissions.html",
hostname=get_hostname()
if conf.getboolean("webserver", "EXPOSE_HOSTNAME")
else "redact",
logout_url=appbuilder.get_url_for_logout,
),
403,
)
else:
access_denied = "Access is Denied"
flash(access_denied, "danger")
return redirect(
url_for(
appbuilder.sm.auth_view.__class__.__name__ + ".login",
next=request.url,
)
)
return cast(T, decorated)
return requires_access_decorator
|
GHSA-2h84-3crq-vgfj
|
tests/www/views/test_views_tasks.py
|
@@ -369,6 +369,20 @@ def test_graph_trigger_origin_graph_view(app, admin_client):
check_content_in_response(href, resp)
+def test_graph_view_without_dag_permission(app, one_dag_perm_user_client):
+ url = "/dags/example_bash_operator/graph"
+ resp = one_dag_perm_user_client.get(url, follow_redirects=True)
+ assert resp.status_code == 200
+ assert resp.request.url == "http://localhost/dags/example_bash_operator/graph"
+ check_content_in_response("example_bash_operator", resp)
+
+ url = "/dags/example_xcom/graph"
+ resp = one_dag_perm_user_client.get(url, follow_redirects=True)
+ assert resp.status_code == 200
+ assert resp.request.url == "http://localhost/home"
+ check_content_in_response("Access is Denied", resp)
+
+
def test_dag_details_trigger_origin_dag_details_view(app, admin_client):
app.dag_bag.get_dag("test_graph_view").create_dagrun(
run_type=DagRunType.SCHEDULED,
@@ -581,6 +595,39 @@ def per_dag_perm_user_client(app, new_dag_to_delete):
delete_roles(app)
[email protected]()
+def one_dag_perm_user_client(app):
+ username = "test_user_one_dag_perm"
+ dag_id = "example_bash_operator"
+ sm = app.appbuilder.sm
+ perm = f"{permissions.RESOURCE_DAG_PREFIX}{dag_id}"
+
+ sm.create_permission(permissions.ACTION_CAN_READ, perm)
+
+ create_user(
+ app,
+ username=username,
+ role_name="User with permission to access only one dag",
+ permissions=[
+ (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
+ (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
+ (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
+ (permissions.ACTION_CAN_READ, perm),
+ ],
+ )
+
+ sm.find_user(username=username)
+
+ yield client_with_login(
+ app,
+ username=username,
+ password=username,
+ )
+
+ delete_user(app, username=username) # type: ignore
+ delete_roles(app)
+
+
def test_delete_just_dag_per_dag_permissions(new_dag_to_delete, per_dag_perm_user_client):
resp = per_dag_perm_user_client.post(
f"delete?dag_id={new_dag_to_delete.dag_id}&next=/home", follow_redirects=True
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import html
import json
import re
import unittest.mock
import urllib.parse
from getpass import getuser
import pytest
import time_machine
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.executors.celery_executor import CeleryExecutor
from airflow.models import DAG, DagBag, DagModel, TaskFail, TaskInstance, TaskReschedule
from airflow.models.dagcode import DagCode
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import ExternalLoggingMixin
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from airflow.www.views import TaskInstanceModelView
from tests.test_utils.api_connexion_utils import create_user, delete_roles, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_runs
from tests.test_utils.www import check_content_in_response, check_content_not_in_response, client_with_login
DEFAULT_DATE = timezone.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
DEFAULT_VAL = urllib.parse.quote_plus(str(DEFAULT_DATE))
DEFAULT_DAGRUN = "TEST_DAGRUN"
@pytest.fixture(scope="module", autouse=True)
def reset_dagruns():
"""Clean up stray garbage from other tests."""
clear_db_runs()
@pytest.fixture(autouse=True)
def init_dagruns(app, reset_dagruns):
with time_machine.travel(DEFAULT_DATE, tick=False):
app.dag_bag.get_dag("example_bash_operator").create_dagrun(
run_id=DEFAULT_DAGRUN,
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
data_interval=(DEFAULT_DATE, DEFAULT_DATE),
start_date=timezone.utcnow(),
state=State.RUNNING,
)
app.dag_bag.get_dag("example_subdag_operator").create_dagrun(
run_id=DEFAULT_DAGRUN,
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
data_interval=(DEFAULT_DATE, DEFAULT_DATE),
start_date=timezone.utcnow(),
state=State.RUNNING,
)
app.dag_bag.get_dag("example_xcom").create_dagrun(
run_id=DEFAULT_DAGRUN,
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
data_interval=(DEFAULT_DATE, DEFAULT_DATE),
start_date=timezone.utcnow(),
state=State.RUNNING,
)
app.dag_bag.get_dag("latest_only").create_dagrun(
run_id=DEFAULT_DAGRUN,
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
data_interval=(DEFAULT_DATE, DEFAULT_DATE),
start_date=timezone.utcnow(),
state=State.RUNNING,
)
yield
clear_db_runs()
@pytest.fixture(scope="module")
def client_ti_without_dag_edit(app):
create_user(
app,
username="all_ti_permissions_except_dag_edit",
role_name="all_ti_permissions_except_dag_edit",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),
],
)
yield client_with_login(
app,
username="all_ti_permissions_except_dag_edit",
password="all_ti_permissions_except_dag_edit",
)
delete_user(app, username="all_ti_permissions_except_dag_edit") # type: ignore
delete_roles(app)
@pytest.mark.parametrize(
"url, contents",
[
pytest.param(
"/",
[
"/delete?dag_id=example_bash_operator",
"return confirmDeleteDag(this, 'example_bash_operator')",
],
id="delete-dag-button-normal",
),
pytest.param(
f"task?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}",
["Task Instance Details"],
id="task",
),
pytest.param(
f"xcom?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}",
["XCom"],
id="xcom",
),
pytest.param("xcom/list", ["List XComs"], id="xcom-list"),
pytest.param(
f"rendered-templates?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}",
["Rendered Template"],
id="rendered-templates",
),
pytest.param(
"dag_details?dag_id=example_bash_operator",
["DAG Details"],
id="dag-details-url-param",
),
pytest.param(
"dag_details?dag_id=example_subdag_operator.section-1",
["DAG Details"],
id="dag-details-subdag-url-param",
),
pytest.param(
"dags/example_subdag_operator.section-1/details",
["DAG Details"],
id="dag-details-subdag",
),
pytest.param(
"graph?dag_id=example_bash_operator",
["runme_1"],
id="graph-url-param",
),
pytest.param(
"dags/example_bash_operator/graph",
["runme_1"],
id="graph",
),
pytest.param(
"object/grid_data?dag_id=example_bash_operator",
["runme_1"],
id="grid-data",
),
pytest.param(
"object/grid_data?dag_id=example_subdag_operator.section-1",
["section-1-task-1"],
id="grid-data-subdag",
),
pytest.param(
"duration?days=30&dag_id=example_bash_operator",
["example_bash_operator"],
id="duration-url-param",
),
pytest.param(
"dags/example_bash_operator/duration?days=30",
["example_bash_operator"],
id="duration",
),
pytest.param(
"duration?days=30&dag_id=missing_dag",
["seems to be missing"],
id="duration-missing-url-param",
),
pytest.param(
"dags/missing_dag/duration?days=30",
["seems to be missing"],
id="duration-missing",
),
pytest.param(
"tries?days=30&dag_id=example_bash_operator",
["example_bash_operator"],
id="tries-url-param",
),
pytest.param(
"dags/example_bash_operator/tries?days=30",
["example_bash_operator"],
id="tries",
),
pytest.param(
"landing_times?days=30&dag_id=example_bash_operator",
["example_bash_operator"],
id="landing-times-url-param",
),
pytest.param(
"dags/example_bash_operator/landing-times?days=30",
["example_bash_operator"],
id="landing-times",
),
pytest.param(
"gantt?dag_id=example_bash_operator",
["example_bash_operator"],
id="gantt-url-param",
),
pytest.param(
"dags/example_bash_operator/gantt",
["example_bash_operator"],
id="gantt",
),
pytest.param(
"dag-dependencies",
["child_task1", "test_trigger_dagrun"],
id="dag-dependencies",
),
# Test that Graph, Tree, Calendar & Dag Details View uses the DagBag
# already created in views.py
pytest.param(
"graph?dag_id=example_bash_operator",
["example_bash_operator"],
id="existing-dagbag-graph-url-param",
),
pytest.param(
"dags/example_bash_operator/graph",
["example_bash_operator"],
id="existing-dagbag-graph",
),
pytest.param(
"tree?dag_id=example_bash_operator",
["example_bash_operator"],
id="existing-dagbag-tree-url-param",
),
pytest.param(
"dags/example_bash_operator/grid",
["example_bash_operator"],
id="existing-dagbag-grid",
),
pytest.param(
"calendar?dag_id=example_bash_operator",
["example_bash_operator"],
id="existing-dagbag-calendar-url-param",
),
pytest.param(
"dags/example_bash_operator/calendar",
["example_bash_operator"],
id="existing-dagbag-calendar",
),
pytest.param(
"dags/latest_only/calendar",
["latest_only"],
id="existing-dagbag-non-cron-schedule-calendar",
),
pytest.param(
"dag_details?dag_id=example_bash_operator",
["example_bash_operator"],
id="existing-dagbag-dag-details-url-param",
),
pytest.param(
"dags/example_bash_operator/details",
["example_bash_operator"],
id="existing-dagbag-dag-details",
),
pytest.param(
f"confirm?task_id=runme_0&dag_id=example_bash_operator&state=success"
f"&dag_run_id={DEFAULT_DAGRUN}",
["Wait a minute"],
id="confirm-success",
),
pytest.param(
f"confirm?task_id=runme_0&dag_id=example_bash_operator&state=failed&dag_run_id={DEFAULT_DAGRUN}",
["Wait a minute"],
id="confirm-failed",
),
pytest.param(
f"confirm?task_id=runme_0&dag_id=invalid_dag&state=failed&dag_run_id={DEFAULT_DAGRUN}",
["DAG invalid_dag not found"],
id="confirm-failed",
),
pytest.param(
f"confirm?task_id=invalid_task&dag_id=example_bash_operator&state=failed"
f"&dag_run_id={DEFAULT_DAGRUN}",
["Task invalid_task not found"],
id="confirm-failed",
),
pytest.param(
f"confirm?task_id=runme_0&dag_id=example_bash_operator&state=invalid"
f"&dag_run_id={DEFAULT_DAGRUN}",
["Invalid state invalid, must be either 'success' or 'failed'"],
id="confirm-invalid",
),
],
)
def test_views_get(admin_client, url, contents):
resp = admin_client.get(url, follow_redirects=True)
for content in contents:
check_content_in_response(content, resp)
def test_rendered_k8s(admin_client):
url = f"rendered-k8s?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}"
with unittest.mock.patch.object(settings, "IS_K8S_OR_K8SCELERY_EXECUTOR", True):
resp = admin_client.get(url, follow_redirects=True)
check_content_in_response("K8s Pod Spec", resp)
@conf_vars({("core", "executor"): "LocalExecutor"})
def test_rendered_k8s_without_k8s(admin_client):
url = f"rendered-k8s?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}"
resp = admin_client.get(url, follow_redirects=True)
assert 404 == resp.status_code
def test_tree_trigger_origin_tree_view(app, admin_client):
app.dag_bag.get_dag("test_tree_view").create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
data_interval=(DEFAULT_DATE, DEFAULT_DATE),
start_date=timezone.utcnow(),
state=State.RUNNING,
)
url = "tree?dag_id=test_tree_view"
resp = admin_client.get(url, follow_redirects=True)
params = {"dag_id": "test_tree_view", "origin": "/dags/test_tree_view/grid"}
href = f"/trigger?{html.escape(urllib.parse.urlencode(params))}"
check_content_in_response(href, resp)
def test_graph_trigger_origin_graph_view(app, admin_client):
app.dag_bag.get_dag("test_tree_view").create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
data_interval=(DEFAULT_DATE, DEFAULT_DATE),
start_date=timezone.utcnow(),
state=State.RUNNING,
)
url = "/dags/test_tree_view/graph"
resp = admin_client.get(url, follow_redirects=True)
params = {"dag_id": "test_tree_view", "origin": "/dags/test_tree_view/graph"}
href = f"/trigger?{html.escape(urllib.parse.urlencode(params))}"
check_content_in_response(href, resp)
def test_dag_details_trigger_origin_dag_details_view(app, admin_client):
app.dag_bag.get_dag("test_graph_view").create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
data_interval=(DEFAULT_DATE, DEFAULT_DATE),
start_date=timezone.utcnow(),
state=State.RUNNING,
)
url = "/dags/test_graph_view/details"
resp = admin_client.get(url, follow_redirects=True)
params = {"dag_id": "test_graph_view", "origin": "/dags/test_graph_view/details"}
href = f"/trigger?{html.escape(urllib.parse.urlencode(params))}"
check_content_in_response(href, resp)
def test_last_dagruns(admin_client):
resp = admin_client.post("last_dagruns", follow_redirects=True)
check_content_in_response("example_bash_operator", resp)
def test_last_dagruns_success_when_selecting_dags(admin_client):
resp = admin_client.post(
"last_dagruns", data={"dag_ids": ["example_subdag_operator"]}, follow_redirects=True
)
assert resp.status_code == 200
stats = json.loads(resp.data.decode("utf-8"))
assert "example_bash_operator" not in stats
assert "example_subdag_operator" in stats
# Multiple
resp = admin_client.post(
"last_dagruns",
data={"dag_ids": ["example_subdag_operator", "example_bash_operator"]},
follow_redirects=True,
)
stats = json.loads(resp.data.decode("utf-8"))
assert "example_bash_operator" in stats
assert "example_subdag_operator" in stats
check_content_not_in_response("example_xcom", resp)
def test_code(admin_client):
url = "code?dag_id=example_bash_operator"
resp = admin_client.get(url, follow_redirects=True)
check_content_not_in_response("Failed to load DAG file Code", resp)
check_content_in_response("example_bash_operator", resp)
def test_code_from_db(admin_client):
dag = DagBag(include_examples=True).get_dag("example_bash_operator")
DagCode(dag.fileloc, DagCode._get_code_from_file(dag.fileloc)).sync_to_db()
url = "code?dag_id=example_bash_operator"
resp = admin_client.get(url, follow_redirects=True)
check_content_not_in_response("Failed to load DAG file Code", resp)
check_content_in_response("example_bash_operator", resp)
def test_code_from_db_all_example_dags(admin_client):
dagbag = DagBag(include_examples=True)
for dag in dagbag.dags.values():
DagCode(dag.fileloc, DagCode._get_code_from_file(dag.fileloc)).sync_to_db()
url = "code?dag_id=example_bash_operator"
resp = admin_client.get(url, follow_redirects=True)
check_content_not_in_response("Failed to load DAG file Code", resp)
check_content_in_response("example_bash_operator", resp)
@pytest.mark.parametrize(
"url, data, content",
[
("paused?dag_id=example_bash_operator&is_paused=false", None, "OK"),
(
"failed",
dict(
task_id="run_this_last",
dag_id="example_bash_operator",
dag_run_id=DEFAULT_DAGRUN,
upstream="false",
downstream="false",
future="false",
past="false",
origin="/graph?dag_id=example_bash_operator",
),
"Marked failed on 1 task instances",
),
(
"success",
dict(
task_id="run_this_last",
dag_id="example_bash_operator",
dag_run_id=DEFAULT_DAGRUN,
upstream="false",
downstream="false",
future="false",
past="false",
origin="/graph?dag_id=example_bash_operator",
),
"Marked success on 1 task instances",
),
(
"clear",
dict(
task_id="runme_1",
dag_id="example_bash_operator",
execution_date=DEFAULT_DATE,
upstream="false",
downstream="false",
future="false",
past="false",
only_failed="false",
),
"example_bash_operator",
),
],
ids=[
"paused",
"failed-flash-hint",
"success-flash-hint",
"clear",
],
)
def test_views_post(admin_client, url, data, content):
resp = admin_client.post(url, data=data, follow_redirects=True)
check_content_in_response(content, resp)
@pytest.mark.parametrize("url", ["failed", "success"])
def test_dag_never_run(admin_client, url):
dag_id = "example_bash_operator"
form = dict(
task_id="run_this_last",
dag_id=dag_id,
execution_date=DEFAULT_DATE,
upstream="false",
downstream="false",
future="false",
past="false",
origin="/graph?dag_id=example_bash_operator",
)
clear_db_runs()
resp = admin_client.post(url, data=form, follow_redirects=True)
check_content_in_response(f"Cannot mark tasks as {url}, seem that DAG {dag_id} has never run", resp)
class _ForceHeartbeatCeleryExecutor(CeleryExecutor):
def heartbeat(self):
return True
@pytest.fixture()
def new_id_example_bash_operator():
dag_id = "example_bash_operator"
test_dag_id = "non_existent_dag"
with create_session() as session:
dag_query = session.query(DagModel).filter(DagModel.dag_id == dag_id)
dag_query.first().tags = [] # To avoid "FOREIGN KEY constraint" error)
with create_session() as session:
dag_query.update({"dag_id": test_dag_id})
yield test_dag_id
with create_session() as session:
session.query(DagModel).filter(DagModel.dag_id == test_dag_id).update({"dag_id": dag_id})
def test_delete_dag_button_for_dag_on_scheduler_only(admin_client, new_id_example_bash_operator):
# Test for JIRA AIRFLOW-3233 (PR 4069):
# The delete-dag URL should be generated correctly for DAGs
# that exist on the scheduler (DB) but not the webserver DagBag
test_dag_id = new_id_example_bash_operator
resp = admin_client.get("/", follow_redirects=True)
check_content_in_response(f"/delete?dag_id={test_dag_id}", resp)
check_content_in_response(f"return confirmDeleteDag(this, '{test_dag_id}')", resp)
@pytest.fixture()
def new_dag_to_delete():
dag = DAG("new_dag_to_delete", is_paused_upon_creation=True)
session = settings.Session()
dag.sync_to_db(session=session)
return dag
@pytest.fixture()
def per_dag_perm_user_client(app, new_dag_to_delete):
sm = app.appbuilder.sm
perm = f"{permissions.RESOURCE_DAG_PREFIX}{new_dag_to_delete.dag_id}"
sm.create_permission(permissions.ACTION_CAN_DELETE, perm)
create_user(
app,
username="test_user_per_dag_perms",
role_name="User with some perms",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, perm),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
],
)
sm.find_user(username="test_user_per_dag_perms")
yield client_with_login(
app,
username="test_user_per_dag_perms",
password="test_user_per_dag_perms",
)
delete_user(app, username="test_user_per_dag_perms") # type: ignore
delete_roles(app)
def test_delete_just_dag_per_dag_permissions(new_dag_to_delete, per_dag_perm_user_client):
resp = per_dag_perm_user_client.post(
f"delete?dag_id={new_dag_to_delete.dag_id}&next=/home", follow_redirects=True
)
check_content_in_response(f"Deleting DAG with id {new_dag_to_delete.dag_id}.", resp)
def test_delete_just_dag_resource_permissions(new_dag_to_delete, user_client):
resp = user_client.post(f"delete?dag_id={new_dag_to_delete.dag_id}&next=/home", follow_redirects=True)
check_content_in_response(f"Deleting DAG with id {new_dag_to_delete.dag_id}.", resp)
@pytest.mark.parametrize("endpoint", ["graph", "tree"])
def test_show_external_log_redirect_link_with_local_log_handler(capture_templates, admin_client, endpoint):
"""Do not show external links if log handler is local."""
url = f"{endpoint}?dag_id=example_bash_operator"
with capture_templates() as templates:
admin_client.get(url, follow_redirects=True)
ctx = templates[0].local_context
assert not ctx["show_external_log_redirect"]
assert ctx["external_log_name"] is None
class _ExternalHandler(ExternalLoggingMixin):
_supports_external_link = True
LOG_NAME = "ExternalLog"
@property
def log_name(self) -> str:
return self.LOG_NAME
def get_external_log_url(self, *args, **kwargs) -> str:
return "http://external-service.com"
@property
def supports_external_link(self) -> bool:
return self._supports_external_link
@pytest.mark.parametrize("endpoint", ["graph", "tree"])
@unittest.mock.patch(
"airflow.utils.log.log_reader.TaskLogReader.log_handler",
new_callable=unittest.mock.PropertyMock,
return_value=_ExternalHandler(),
)
def test_show_external_log_redirect_link_with_external_log_handler(
_, capture_templates, admin_client, endpoint
):
"""Show external links if log handler is external."""
url = f"{endpoint}?dag_id=example_bash_operator"
with capture_templates() as templates:
admin_client.get(url, follow_redirects=True)
ctx = templates[0].local_context
assert ctx["show_external_log_redirect"]
assert ctx["external_log_name"] == _ExternalHandler.LOG_NAME
@pytest.mark.parametrize("endpoint", ["graph", "tree"])
@unittest.mock.patch(
"airflow.utils.log.log_reader.TaskLogReader.log_handler",
new_callable=unittest.mock.PropertyMock,
return_value=_ExternalHandler(),
)
def test_external_log_redirect_link_with_external_log_handler_not_shown(
_external_handler, capture_templates, admin_client, endpoint
):
"""Show external links if log handler is external."""
_external_handler.return_value._supports_external_link = False
url = f"{endpoint}?dag_id=example_bash_operator"
with capture_templates() as templates:
admin_client.get(url, follow_redirects=True)
ctx = templates[0].local_context
assert not ctx["show_external_log_redirect"]
assert ctx["external_log_name"] is None
def _get_appbuilder_pk_string(model_view_cls, instance) -> str:
"""Utility to get Flask-Appbuilder's string format "pk" for an object.
Used to generate requests to FAB action views without *too* much difficulty.
The implementation relies on FAB internals, but unfortunately I don't see
a better way around it.
Example usage::
from airflow.www.views import TaskInstanceModelView
ti = session.Query(TaskInstance).filter(...).one()
pk = _get_appbuilder_pk_string(TaskInstanceModelView, ti)
client.post("...", data={"action": "...", "rowid": pk})
"""
pk_value = model_view_cls.datamodel.get_pk_value(instance)
return model_view_cls._serialize_pk_if_composite(model_view_cls, pk_value)
def test_task_instance_delete(session, admin_client, create_task_instance):
task_instance_to_delete = create_task_instance(
task_id="test_task_instance_delete",
execution_date=timezone.utcnow(),
state=State.DEFERRED,
)
composite_key = _get_appbuilder_pk_string(TaskInstanceModelView, task_instance_to_delete)
task_id = task_instance_to_delete.task_id
assert session.query(TaskInstance).filter(TaskInstance.task_id == task_id).count() == 1
admin_client.post(f"/taskinstance/delete/{composite_key}", follow_redirects=True)
assert session.query(TaskInstance).filter(TaskInstance.task_id == task_id).count() == 0
def test_task_instance_delete_permission_denied(session, client_ti_without_dag_edit, create_task_instance):
task_instance_to_delete = create_task_instance(
task_id="test_task_instance_delete_permission_denied",
execution_date=timezone.utcnow(),
state=State.DEFERRED,
session=session,
)
session.commit()
composite_key = _get_appbuilder_pk_string(TaskInstanceModelView, task_instance_to_delete)
task_id = task_instance_to_delete.task_id
assert session.query(TaskInstance).filter(TaskInstance.task_id == task_id).count() == 1
resp = client_ti_without_dag_edit.post(f"/taskinstance/delete/{composite_key}", follow_redirects=True)
check_content_in_response(f"Access denied for dag_id {task_instance_to_delete.dag_id}", resp)
assert session.query(TaskInstance).filter(TaskInstance.task_id == task_id).count() == 1
@pytest.mark.parametrize(
"client_fixture, should_succeed",
[
("admin_client", True),
("user_client", True),
("viewer_client", False),
("anonymous_client", False),
],
)
def test_task_instance_clear(session, request, client_fixture, should_succeed):
client = request.getfixturevalue(client_fixture)
task_id = "runme_0"
initial_state = State.SUCCESS
# Set the state to success for clearing.
ti_q = session.query(TaskInstance).filter(TaskInstance.task_id == task_id)
ti_q.update({"state": initial_state})
session.commit()
# Send a request to clear.
rowid = _get_appbuilder_pk_string(TaskInstanceModelView, ti_q.one())
resp = client.post(
"/taskinstance/action_post",
data={"action": "clear", "rowid": rowid},
follow_redirects=True,
)
assert resp.status_code == (200 if should_succeed else 404)
# Now the state should be None.
state = session.query(TaskInstance.state).filter(TaskInstance.task_id == task_id).scalar()
assert state == (State.NONE if should_succeed else initial_state)
def test_task_instance_clear_failure(admin_client):
rowid = '["12345"]' # F.A.B. crashes if the rowid is *too* invalid.
resp = admin_client.post(
"/taskinstance/action_post",
data={"action": "clear", "rowid": rowid},
follow_redirects=True,
)
assert resp.status_code == 200
check_content_in_response("Failed to clear task instances:", resp)
@pytest.mark.parametrize(
"action, expected_state",
[
("set_running", State.RUNNING),
("set_failed", State.FAILED),
("set_success", State.SUCCESS),
("set_retry", State.UP_FOR_RETRY),
("set_skipped", State.SKIPPED),
],
ids=["running", "failed", "success", "retry", "skipped"],
)
def test_task_instance_set_state(session, admin_client, action, expected_state):
task_id = "runme_0"
# Send a request to clear.
ti_q = session.query(TaskInstance).filter(TaskInstance.task_id == task_id)
rowid = _get_appbuilder_pk_string(TaskInstanceModelView, ti_q.one())
resp = admin_client.post(
"/taskinstance/action_post",
data={"action": action, "rowid": rowid},
follow_redirects=True,
)
assert resp.status_code == 200
# Now the state should be modified.
state = session.query(TaskInstance.state).filter(TaskInstance.task_id == task_id).scalar()
assert state == expected_state
@pytest.mark.parametrize(
"action",
[
"set_running",
"set_failed",
"set_success",
"set_retry",
"set_skipped",
],
)
def test_task_instance_set_state_failure(admin_client, action):
rowid = '["12345"]' # F.A.B. crashes if the rowid is *too* invalid.
resp = admin_client.post(
"/taskinstance/action_post",
data={"action": action, "rowid": rowid},
follow_redirects=True,
)
assert resp.status_code == 200
check_content_in_response("Failed to set state", resp)
@pytest.mark.parametrize(
"task_search_tuples",
[
[("example_xcom", "bash_push"), ("example_bash_operator", "run_this_last")],
[("example_subdag_operator", "some-other-task")],
],
ids=["multiple_tasks", "one_task"],
)
def test_action_muldelete_task_instance(session, admin_client, task_search_tuples):
# get task instances to delete
tasks_to_delete = []
for task_search_tuple in task_search_tuples:
dag_id, task_id = task_search_tuple
tasks_to_delete.append(
session.query(TaskInstance)
.filter(TaskInstance.task_id == task_id, TaskInstance.dag_id == dag_id)
.one()
)
# add task reschedules for those tasks to make sure that the delete cascades to the required tables
trs = [
TaskReschedule(
task=task,
run_id=task.run_id,
try_number=1,
start_date=timezone.datetime(2021, 1, 1),
end_date=timezone.datetime(2021, 1, 2),
reschedule_date=timezone.datetime(2021, 1, 3),
)
for task in tasks_to_delete
]
session.bulk_save_objects(trs)
session.flush()
# run the function to test
resp = admin_client.post(
"/taskinstance/action_post",
data={
"action": "muldelete",
"rowid": [_get_appbuilder_pk_string(TaskInstanceModelView, task) for task in tasks_to_delete],
},
follow_redirects=True,
)
# assert expected behavior for that function and its response
assert resp.status_code == 200
for task_search_tuple in task_search_tuples:
dag_id, task_id = task_search_tuple
assert (
session.query(TaskInstance)
.filter(TaskInstance.task_id == task_id, TaskInstance.dag_id == dag_id)
.count()
== 0
)
assert session.query(TaskReschedule).count() == 0
def test_task_fail_duration(app, admin_client, dag_maker, session):
"""Task duration page with a TaskFail entry should render without error."""
with dag_maker() as dag:
op1 = BashOperator(task_id="fail", bash_command="exit 1")
op2 = BashOperator(task_id="success", bash_command="exit 0")
with pytest.raises(AirflowException):
op1.run()
op2.run()
op1_fails = (
session.query(TaskFail)
.filter(
TaskFail.task_id == "fail",
TaskFail.dag_id == dag.dag_id,
)
.all()
)
op2_fails = (
session.query(TaskFail)
.filter(
TaskFail.task_id == "success",
TaskFail.dag_id == dag.dag_id,
)
.all()
)
assert len(op1_fails) == 1
assert len(op2_fails) == 0
with unittest.mock.patch.object(app, "dag_bag") as mocked_dag_bag:
mocked_dag_bag.get_dag.return_value = dag
resp = admin_client.get(f"dags/{dag.dag_id}/duration", follow_redirects=True)
html = resp.get_data().decode()
cumulative_chart = json.loads(re.search("data_cumlinechart=(.*);", html).group(1))
line_chart = json.loads(re.search("data_linechart=(.*);", html).group(1))
assert resp.status_code == 200
assert sorted(item["key"] for item in cumulative_chart) == ["fail", "success"]
assert sorted(item["key"] for item in line_chart) == ["fail", "success"]
def test_graph_view_doesnt_fail_on_recursion_error(app, dag_maker, admin_client):
"""Test that the graph view doesn't fail on a recursion error."""
from airflow.models.baseoperator import chain
with dag_maker("test_fails_with_recursion") as dag:
tasks = [
BashOperator(
task_id=f"task_{i}",
bash_command="echo test",
)
for i in range(1, 1000 + 1)
]
chain(*tasks)
with unittest.mock.patch.object(app, "dag_bag") as mocked_dag_bag:
mocked_dag_bag.get_dag.return_value = dag
url = f"/dags/{dag.dag_id}/graph"
resp = admin_client.get(url, follow_redirects=True)
assert resp.status_code == 200
def test_task_instances(admin_client):
"""Test task_instances view."""
resp = admin_client.get(
f"/object/task_instances?dag_id=example_bash_operator&execution_date={DEFAULT_DATE}",
follow_redirects=True,
)
assert resp.status_code == 200
assert resp.json == {
"also_run_this": {
"dag_id": "example_bash_operator",
"duration": None,
"end_date": None,
"executor_config": {},
"external_executor_id": None,
"hostname": "",
"job_id": None,
"map_index": -1,
"max_tries": 0,
"next_kwargs": None,
"next_method": None,
"operator": "BashOperator",
"pid": None,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 2,
"queue": "default",
"queued_by_job_id": None,
"queued_dttm": None,
"run_id": "TEST_DAGRUN",
"start_date": None,
"state": None,
"task_id": "also_run_this",
"trigger_id": None,
"trigger_timeout": None,
"try_number": 1,
"unixname": getuser(),
"updated_at": DEFAULT_DATE.isoformat(),
},
"run_after_loop": {
"dag_id": "example_bash_operator",
"duration": None,
"end_date": None,
"executor_config": {},
"external_executor_id": None,
"hostname": "",
"job_id": None,
"map_index": -1,
"max_tries": 0,
"next_kwargs": None,
"next_method": None,
"operator": "BashOperator",
"pid": None,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 2,
"queue": "default",
"queued_by_job_id": None,
"queued_dttm": None,
"run_id": "TEST_DAGRUN",
"start_date": None,
"state": None,
"task_id": "run_after_loop",
"trigger_id": None,
"trigger_timeout": None,
"try_number": 1,
"unixname": getuser(),
"updated_at": DEFAULT_DATE.isoformat(),
},
"run_this_last": {
"dag_id": "example_bash_operator",
"duration": None,
"end_date": None,
"executor_config": {},
"external_executor_id": None,
"hostname": "",
"job_id": None,
"map_index": -1,
"max_tries": 0,
"next_kwargs": None,
"next_method": None,
"operator": "EmptyOperator",
"pid": None,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 1,
"queue": "default",
"queued_by_job_id": None,
"queued_dttm": None,
"run_id": "TEST_DAGRUN",
"start_date": None,
"state": None,
"task_id": "run_this_last",
"trigger_id": None,
"trigger_timeout": None,
"try_number": 1,
"unixname": getuser(),
"updated_at": DEFAULT_DATE.isoformat(),
},
"runme_0": {
"dag_id": "example_bash_operator",
"duration": None,
"end_date": None,
"executor_config": {},
"external_executor_id": None,
"hostname": "",
"job_id": None,
"map_index": -1,
"max_tries": 0,
"next_kwargs": None,
"next_method": None,
"operator": "BashOperator",
"pid": None,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 3,
"queue": "default",
"queued_by_job_id": None,
"queued_dttm": None,
"run_id": "TEST_DAGRUN",
"start_date": None,
"state": None,
"task_id": "runme_0",
"trigger_id": None,
"trigger_timeout": None,
"try_number": 1,
"unixname": getuser(),
"updated_at": DEFAULT_DATE.isoformat(),
},
"runme_1": {
"dag_id": "example_bash_operator",
"duration": None,
"end_date": None,
"executor_config": {},
"external_executor_id": None,
"hostname": "",
"job_id": None,
"map_index": -1,
"max_tries": 0,
"next_kwargs": None,
"next_method": None,
"operator": "BashOperator",
"pid": None,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 3,
"queue": "default",
"queued_by_job_id": None,
"queued_dttm": None,
"run_id": "TEST_DAGRUN",
"start_date": None,
"state": None,
"task_id": "runme_1",
"trigger_id": None,
"trigger_timeout": None,
"try_number": 1,
"unixname": getuser(),
"updated_at": DEFAULT_DATE.isoformat(),
},
"runme_2": {
"dag_id": "example_bash_operator",
"duration": None,
"end_date": None,
"executor_config": {},
"external_executor_id": None,
"hostname": "",
"job_id": None,
"map_index": -1,
"max_tries": 0,
"next_kwargs": None,
"next_method": None,
"operator": "BashOperator",
"pid": None,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 3,
"queue": "default",
"queued_by_job_id": None,
"queued_dttm": None,
"run_id": "TEST_DAGRUN",
"start_date": None,
"state": None,
"task_id": "runme_2",
"trigger_id": None,
"trigger_timeout": None,
"try_number": 1,
"unixname": getuser(),
"updated_at": DEFAULT_DATE.isoformat(),
},
"this_will_skip": {
"dag_id": "example_bash_operator",
"duration": None,
"end_date": None,
"executor_config": {},
"external_executor_id": None,
"hostname": "",
"job_id": None,
"map_index": -1,
"max_tries": 0,
"next_kwargs": None,
"next_method": None,
"operator": "BashOperator",
"pid": None,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 2,
"queue": "default",
"queued_by_job_id": None,
"queued_dttm": None,
"run_id": "TEST_DAGRUN",
"start_date": None,
"state": None,
"task_id": "this_will_skip",
"trigger_id": None,
"trigger_timeout": None,
"try_number": 1,
"unixname": getuser(),
"updated_at": DEFAULT_DATE.isoformat(),
},
}
|
GHSA-2h84-3crq-vgfj
|
src/picklescan/scanner.py
|
@@ -11,6 +11,7 @@
from typing import IO, List, Optional, Set, Tuple
import urllib.parse
import zipfile
+from .relaxed_zipfile import RelaxedZipFile
from .torch import (
get_magic_number,
@@ -375,7 +376,7 @@ def get_magic_bytes_from_zipfile(zip: zipfile.ZipFile, num_bytes=8):
def scan_zip_bytes(data: IO[bytes], file_id) -> ScanResult:
result = ScanResult([])
- with zipfile.ZipFile(data, "r") as zip:
+ with RelaxedZipFile(data, "r") as zip:
magic_bytes = get_magic_bytes_from_zipfile(zip)
file_names = zip.namelist()
_log.debug("Files in zip archive %s: %s", file_id, file_names)
|
from dataclasses import dataclass
from enum import Enum
import http.client
import io
import json
import logging
import os
import pickletools
from tarfile import TarError
from tempfile import TemporaryDirectory
from typing import IO, List, Optional, Set, Tuple
import urllib.parse
import zipfile
from .torch import (
get_magic_number,
InvalidMagicError,
_is_zipfile,
MAGIC_NUMBER,
_should_read_directly,
)
class SafetyLevel(Enum):
Innocuous = "innocuous"
Suspicious = "suspicious"
Dangerous = "dangerous"
@dataclass
class Global:
module: str
name: str
safety: SafetyLevel
@dataclass
class ScanResult:
globals: List[Global]
scanned_files: int = 0
issues_count: int = 0
infected_files: int = 0
scan_err: bool = False
def merge(self, sr: "ScanResult"):
self.globals.extend(sr.globals)
self.scanned_files += sr.scanned_files
self.issues_count += sr.issues_count
self.infected_files += sr.infected_files
self.scan_err = self.scan_err or sr.scan_err
class GenOpsError(Exception):
def __init__(self, msg: str, globals: Optional[Set[Tuple[str, str]]]):
self.msg = msg
self.globals = globals
super().__init__()
def __str__(self) -> str:
return self.msg
_log = logging.getLogger("picklescan")
_safe_globals = {
"collections": {"OrderedDict"},
"torch": {
"LongStorage",
"FloatStorage",
"HalfStorage",
"QUInt2x4Storage",
"QUInt4x2Storage",
"QInt32Storage",
"QInt8Storage",
"QUInt8Storage",
"ComplexFloatStorage",
"ComplexDoubleStorage",
"DoubleStorage",
"BFloat16Storage",
"BoolStorage",
"CharStorage",
"ShortStorage",
"IntStorage",
"ByteStorage",
},
"numpy": {
"dtype",
"ndarray",
},
"numpy.core.multiarray": {
"_reconstruct",
},
"torch._utils": {"_rebuild_tensor_v2"},
}
_unsafe_globals = {
"__builtin__": {
"eval",
"compile",
"getattr",
"apply",
"exec",
"open",
"breakpoint",
}, # Pickle versions 0, 1, 2 have those function under '__builtin__'
"builtins": {
"eval",
"compile",
"getattr",
"apply",
"exec",
"open",
"breakpoint",
}, # Pickle versions 3, 4 have those function under 'builtins'
"webbrowser": "*", # Includes webbrowser.open()
"httplib": "*", # Includes http.client.HTTPSConnection()
"requests.api": "*",
"aiohttp.client": "*",
"os": "*",
"nt": "*", # Alias for 'os' on Windows. Includes os.system()
"posix": "*", # Alias for 'os' on Linux. Includes os.system()
"socket": "*",
"subprocess": "*",
"sys": "*",
"shutil": "*",
"runpy": "*", # Includes runpy._run_code
"operator": "attrgetter", # Ex of code execution: operator.attrgetter("system")(__import__("os"))("echo pwned")
"pickle": "*",
"_pickle": "*",
"bdb": "*",
"pdb": "*",
"asyncio": "*",
"pydoc": "pipepager", # pydoc.pipepager('help','echo pwned')
"venv": "*",
"torch.serialization": "load", # pickle could be used to load a different file
"functools": "partial", # functools.partial(os.system, "echo pwned")
"torch._inductor.codecache": "compile_file", # compile_file('', '', ['sh', '-c','$(echo pwned)'])
"pip": "*",
}
#
# TODO: handle methods loading other Pickle files (either mark as suspicious, or follow calls to scan other files [preventing infinite loops])
#
# numpy.load()
# https://numpy.org/doc/stable/reference/generated/numpy.load.html#numpy.load
# numpy.ctypeslib.load_library()
# https://numpy.org/doc/stable/reference/routines.ctypeslib.html#numpy.ctypeslib.load_library
# pandas.read_pickle()
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_pickle.html
# joblib.load()
# https://joblib.readthedocs.io/en/latest/generated/joblib.load.html
# torch.load()
# https://pytorch.org/docs/stable/generated/torch.load.html
# tf.keras.models.load_model()
# https://www.tensorflow.org/api_docs/python/tf/keras/models/load_model
#
_numpy_file_extensions = {".npy"} # Note: .npz is handled as zip files
_pytorch_file_extensions = {".bin", ".pt", ".pth", ".ckpt"}
_pickle_file_extensions = {".pkl", ".pickle", ".joblib", ".dat", ".data"}
_zip_file_extensions = {".zip", ".npz", ".7z"}
_pickle_magic_bytes = {
b"\x80\x00",
b"\x80\x01",
b"\x80\x02",
b"\x80\x03",
b"\x80\x04",
b"\x80\x05",
}
def _is_7z_file(f: IO[bytes]) -> bool:
read_bytes = []
start = f.tell()
byte = f.read(1)
while byte != b"":
read_bytes.append(byte)
if len(read_bytes) == 6:
break
byte = f.read(1)
f.seek(start)
local_header_magic_number = [b"7", b"z", b"\xbc", b"\xaf", b"\x27", b"\x1c"]
return read_bytes == local_header_magic_number
def _http_get(url) -> bytes:
_log.debug(f"Request: GET {url}")
parsed_url = urllib.parse.urlparse(url)
path_and_query = parsed_url.path + (
"?" + parsed_url.query if len(parsed_url.query) > 0 else ""
)
conn = http.client.HTTPSConnection(parsed_url.netloc)
try:
conn.request("GET", path_and_query)
response = conn.getresponse()
_log.debug(f"Response: status code {response.status} reason {response.reason}")
if response.status == 302: # Follow redirections
return _http_get(response.headers["Location"])
elif response.status >= 400:
raise RuntimeError(
f"HTTP {response.status} ({response.reason}) calling GET {parsed_url.scheme}://{parsed_url.netloc}{path_and_query}"
)
return response.read()
finally:
conn.close()
def _list_globals(data: IO[bytes], multiple_pickles=True) -> Set[Tuple[str, str]]:
globals = set()
memo = {}
# Scan the data for pickle buffers, stopping when parsing fails or stops making progress
last_byte = b"dummy"
parsing_pkl_error = None
while last_byte != b"":
# List opcodes
ops = []
try:
for op in pickletools.genops(data):
ops.append(op)
except Exception as e:
parsing_pkl_error = str(e)
last_byte = data.read(1)
data.seek(-1, 1)
# Extract global imports
for n in range(len(ops)):
op = ops[n]
op_name = op[0].name
op_value = op[1]
if op_name == "MEMOIZE" and n > 0:
memo[len(memo)] = ops[n - 1][1]
elif op_name in ["PUT", "BINPUT", "LONG_BINPUT"] and n > 0:
memo[op_value] = ops[n - 1][1]
elif op_name in ("GLOBAL", "INST"):
globals.add(tuple(op_value.split(" ", 1)))
elif op_name == "STACK_GLOBAL":
values = []
for offset in range(1, n):
if ops[n - offset][0].name in [
"MEMOIZE",
"PUT",
"BINPUT",
"LONG_BINPUT",
]:
continue
if ops[n - offset][0].name in ["GET", "BINGET", "LONG_BINGET"]:
values.append(memo[int(ops[n - offset][1])])
elif ops[n - offset][0].name not in [
"SHORT_BINUNICODE",
"UNICODE",
"BINUNICODE",
"BINUNICODE8",
]:
_log.debug(
"Presence of non-string opcode, categorizing as an unknown dangerous import"
)
values.append("unknown")
else:
values.append(ops[n - offset][1])
if len(values) == 2:
break
if len(values) != 2:
raise ValueError(
f"Found {len(values)} values for STACK_GLOBAL at position {n} instead of 2."
)
globals.add((values[1], values[0]))
if not multiple_pickles:
break
if parsing_pkl_error is not None:
# XXX: given we can have multiple pickles in a file, we may have already successfully extracted globals from a valid pickle.
# Thus return the already found globals in the error & let the caller decide what to do.
# Additionally, we return the error at the end of the loop to scan imports in partially broken files,
# which can unpickle and be dangerous regardless of being valid pickle.
globals_opt = globals if len(globals) > 0 else None
raise GenOpsError(parsing_pkl_error, globals_opt)
return globals
def _build_scan_result_from_raw_globals(
raw_globals: Set[Tuple[str, str]],
file_id,
scan_err=False,
) -> ScanResult:
globals = []
issues_count = 0
for rg in raw_globals:
g = Global(rg[0], rg[1], SafetyLevel.Dangerous)
safe_filter = _safe_globals.get(g.module)
unsafe_filter = _unsafe_globals.get(g.module)
if "unknown" in g.module or "unknown" in g.name:
g.safety = SafetyLevel.Dangerous
_log.warning(
"%s: %s import '%s %s' FOUND", file_id, g.safety.value, g.module, g.name
)
issues_count += 1
elif unsafe_filter is not None and (
unsafe_filter == "*" or g.name in unsafe_filter
):
g.safety = SafetyLevel.Dangerous
_log.warning(
"%s: %s import '%s %s' FOUND", file_id, g.safety.value, g.module, g.name
)
issues_count += 1
elif safe_filter is not None and (safe_filter == "*" or g.name in safe_filter):
g.safety = SafetyLevel.Innocuous
else:
g.safety = SafetyLevel.Suspicious
globals.append(g)
return ScanResult(globals, 1, issues_count, 1 if issues_count > 0 else 0, scan_err)
def scan_pickle_bytes(data: IO[bytes], file_id, multiple_pickles=True) -> ScanResult:
"""Disassemble a Pickle stream and report issues"""
try:
raw_globals = _list_globals(data, multiple_pickles)
except GenOpsError as e:
_log.error(f"ERROR: parsing pickle in {file_id}: {e}")
if e.globals is not None:
return _build_scan_result_from_raw_globals(
e.globals, file_id, scan_err=True
)
else:
return ScanResult([], scan_err=True)
_log.debug("Global imports in %s: %s", file_id, raw_globals)
return _build_scan_result_from_raw_globals(raw_globals, file_id)
# XXX: it appears there is not way to get the byte stream for a given file within the 7z archive and thus forcing us to unzip to disk before scanning
def scan_7z_bytes(data: IO[bytes], file_id) -> ScanResult:
try:
import py7zr
except ImportError:
raise Exception(
"py7zr is required to scan 7z archives, install picklescan using: 'pip install picklescan[7z]'"
)
result = ScanResult([])
with py7zr.SevenZipFile(data, mode="r") as archive:
file_names = archive.getnames()
targets = [f for f in file_names if f.endswith(tuple(_pickle_file_extensions))]
_log.debug("Files in 7z archive %s: %s", file_id, targets)
with TemporaryDirectory() as tmpdir:
archive.extract(path=tmpdir, targets=targets)
for file_name in targets:
file_path = os.path.join(tmpdir, file_name)
_log.debug("Scanning file %s in 7z archive %s", file_name, file_id)
if os.path.isfile(file_path):
result.merge(scan_file_path(file_path))
return result
def get_magic_bytes_from_zipfile(zip: zipfile.ZipFile, num_bytes=8):
magic_bytes = {}
for file_info in zip.infolist():
with zip.open(file_info.filename) as f:
magic_bytes[file_info.filename] = f.read(num_bytes)
return magic_bytes
def scan_zip_bytes(data: IO[bytes], file_id) -> ScanResult:
result = ScanResult([])
with zipfile.ZipFile(data, "r") as zip:
magic_bytes = get_magic_bytes_from_zipfile(zip)
file_names = zip.namelist()
_log.debug("Files in zip archive %s: %s", file_id, file_names)
for file_name in file_names:
magic_number = magic_bytes.get(file_name, b"")
file_ext = os.path.splitext(file_name)[1]
if file_ext in _pickle_file_extensions or any(
magic_number.startswith(mn) for mn in _pickle_magic_bytes
):
_log.debug("Scanning file %s in zip archive %s", file_name, file_id)
with zip.open(file_name, "r") as file:
result.merge(scan_pickle_bytes(file, f"{file_id}:{file_name}"))
elif file_ext in _numpy_file_extensions or magic_number.startswith(
b"\x93NUMPY"
):
_log.debug("Scanning file %s in zip archive %s", file_name, file_id)
with zip.open(file_name, "r") as file:
result.merge(scan_numpy(file, f"{file_id}:{file_name}"))
return result
def scan_numpy(data: IO[bytes], file_id) -> ScanResult:
# Delay import to avoid dependency on NumPy
import numpy as np
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b"PK\x03\x04"
_ZIP_SUFFIX = b"PK\x05\x06" # empty zip files start with this
N = len(np.lib.format.MAGIC_PREFIX)
magic = data.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
data.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
# .npz file
raise ValueError(f".npz file not handled as zip file: {file_id}")
elif magic == np.lib.format.MAGIC_PREFIX:
# .npy file
version = np.lib.format.read_magic(data)
np.lib.format._check_version(version)
_, _, dtype = np.lib.format._read_array_header(data, version)
if dtype.hasobject:
return scan_pickle_bytes(data, file_id)
else:
return ScanResult([], 1)
else:
return scan_pickle_bytes(data, file_id)
def scan_pytorch(data: IO[bytes], file_id) -> ScanResult:
# new pytorch format
if _is_zipfile(data):
return scan_zip_bytes(data, file_id)
elif _is_7z_file(data):
return scan_7z_bytes(data, file_id)
# old pytorch format
else:
scan_result = ScanResult([])
should_read_directly = _should_read_directly(data)
if should_read_directly and data.tell() == 0:
# try loading from tar
try:
# TODO: implement loading from tar
raise TarError()
except TarError:
# file does not contain a tar
data.seek(0)
magic = get_magic_number(data)
if magic != MAGIC_NUMBER:
raise InvalidMagicError(magic, MAGIC_NUMBER, file_id)
for _ in range(5):
scan_result.merge(scan_pickle_bytes(data, file_id, multiple_pickles=False))
scan_result.scanned_files = 1
return scan_result
def scan_bytes(data: IO[bytes], file_id, file_ext: Optional[str] = None) -> ScanResult:
if file_ext is not None and file_ext in _pytorch_file_extensions:
try:
return scan_pytorch(data, file_id)
except InvalidMagicError as e:
_log.error(f"ERROR: Invalid magic number for file {e}")
return ScanResult([], scan_err=True)
elif file_ext is not None and file_ext in _numpy_file_extensions:
return scan_numpy(data, file_id)
else:
is_zip = zipfile.is_zipfile(data)
data.seek(0)
if is_zip:
return scan_zip_bytes(data, file_id)
elif _is_7z_file(data):
return scan_7z_bytes(data, file_id)
else:
return scan_pickle_bytes(data, file_id)
def scan_huggingface_model(repo_id):
# List model files
model = json.loads(
_http_get(f"https://huggingface.co/api/models/{repo_id}").decode("utf-8")
)
file_names = [
file_name
for file_name in (sibling.get("rfilename") for sibling in model["siblings"])
if file_name is not None
]
# Scan model files
scan_result = ScanResult([])
for file_name in file_names:
file_ext = os.path.splitext(file_name)[1]
if (
file_ext not in _zip_file_extensions
and file_ext not in _pickle_file_extensions
and file_ext not in _pytorch_file_extensions
):
continue
_log.debug("Scanning file %s in model %s", file_name, repo_id)
url = f"https://huggingface.co/{repo_id}/resolve/main/{file_name}"
data = io.BytesIO(_http_get(url))
scan_result.merge(scan_bytes(data, url, file_ext))
return scan_result
def scan_directory_path(path) -> ScanResult:
scan_result = ScanResult([])
for base_path, _, file_names in os.walk(path):
for file_name in file_names:
file_ext = os.path.splitext(file_name)[1]
if (
file_ext not in _zip_file_extensions
and file_ext not in _pickle_file_extensions
and file_ext not in _pytorch_file_extensions
):
continue
file_path = os.path.join(base_path, file_name)
_log.debug("Scanning file %s", file_path)
with open(file_path, "rb") as file:
scan_result.merge(scan_bytes(file, file_path, file_ext))
return scan_result
def scan_file_path(path) -> ScanResult:
file_ext = os.path.splitext(path)[1]
with open(path, "rb") as file:
return scan_bytes(file, path, file_ext)
def scan_url(url) -> ScanResult:
return scan_bytes(io.BytesIO(_http_get(url)), url)
|
GHSA-2fh4-gpch-vqv4
|
tests/test_scanner.py
|
@@ -249,6 +249,22 @@ def initialize_zip_file(path, file_name, data):
zip.writestr(file_name, data)
+def initialize_corrupt_zip_file_central_directory(path, file_name, data):
+ if not os.path.exists(path):
+ with zipfile.ZipFile(path, "w") as zip:
+ zip.writestr(file_name, data)
+
+ with open(path, "rb") as f:
+ data = f.read()
+
+ # Replace only the first occurrence of "data.pkl" with "datap.kl"
+ modified_data = data.replace(b"data.pkl", b"datap.kl", 1)
+
+ # Write back the modified content
+ with open(path, "wb") as f:
+ f.write(modified_data)
+
+
def initialize_numpy_files():
import numpy as np
@@ -490,6 +506,12 @@ def initialize_pickle_files():
pickle.dumps(Malicious1(), protocol=4),
)
+ initialize_corrupt_zip_file_central_directory(
+ f"{_root_path}/data/malicious1_central_directory.zip",
+ "data.pkl",
+ pickle.dumps(Malicious1(), protocol=4),
+ )
+
initialize_zip_file(
f"{_root_path}/data/malicious1_wrong_ext.zip",
"data.txt", # Pickle file with a non-standard extension
@@ -646,7 +668,22 @@ def test_scan_file_path():
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious1.zip"), malicious1
)
- compare_scan_results(scan_file_path(f"{_root_path}/data/malicious1.7z"), malicious1)
+ compare_scan_results(
+ scan_file_path(f"{_root_path}/data/malicious1_central_directory.zip"),
+ malicious1,
+ )
+ compare_scan_results(
+ scan_file_path(f"{_root_path}/data/malicious1_0x1.zip"), malicious1
+ )
+ compare_scan_results(
+ scan_file_path(f"{_root_path}/data/malicious1_0x20.zip"), malicious1
+ )
+ compare_scan_results(
+ scan_file_path(f"{_root_path}/data/malicious1_0x40.zip"), malicious1
+ )
+ compare_scan_results(
+ scan_file_path(f"{_root_path}/data/malicious1.7z"), malicious1
+ )
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious1_wrong_ext.zip"), malicious1
)
@@ -835,6 +872,9 @@ def test_scan_directory_path():
Global("functools", "partial", SafetyLevel.Dangerous),
Global("pip", "main", SafetyLevel.Dangerous),
Global("builtins", "eval", SafetyLevel.Dangerous),
+ Global("builtins", "eval", SafetyLevel.Dangerous),
+ Global("builtins", "eval", SafetyLevel.Dangerous),
+ Global("builtins", "eval", SafetyLevel.Dangerous),
],
scanned_files=38,
issues_count=39,
|
import aiohttp
import bdb
import http.client
import importlib
import io
import os
import pathlib
import pickle
import pip
import py7zr
import pydoc
import pytest
import requests
import runpy
import socket
import subprocess
import sys
import venv
import zipfile
from functools import partial
from unittest import TestCase
from picklescan.cli import main
from picklescan.scanner import (
Global,
SafetyLevel,
ScanResult,
_http_get,
_list_globals,
scan_pickle_bytes,
scan_zip_bytes,
scan_directory_path,
scan_file_path,
scan_url,
scan_huggingface_model,
scan_numpy,
scan_pytorch,
)
try:
import torch
import torch._inductor.codecache as codecache
except ImportError:
# If PyTorch test files need to be regenerated, run 'pip install torch==2.6.0' first
torch = None
codecache = None
_root_path = os.path.dirname(__file__)
class Malicious1:
def __reduce__(self):
return eval, ("print('456')",)
class Malicious2:
def __reduce__(self):
return os.system, ("ls -la",)
class Malicious3:
def __reduce__(self):
return http.client.HTTPSConnection, ("github.com",)
malicious3_pickle_bytes = pickle.dumps(
Malicious3(), protocol=0
) # Malicious3 needs to be pickled before HTTPSConnection is mocked below
class Malicious4:
def __reduce__(self):
return requests.get, ("https://github.com",)
class Malicious5:
def __reduce__(self):
return aiohttp.ClientSession, tuple()
class Malicious6:
def __reduce__(self):
return socket.create_connection, (("github.com", 80),)
class Malicious7:
def __reduce__(self):
return subprocess.run, (["ls", "-l"],)
class Malicious8:
def __reduce__(self):
return sys.exit, (0,)
class Malicious13:
def __reduce__(self):
return pickle.loads, (b"I12345\n.",) # Loads the value 12345
class Malicious14:
def __reduce__(self):
return runpy._run_code, ("print('456')",)
class Malicious15:
def __reduce__(self):
bd = bdb.Bdb()
return bdb.Bdb.run, (
bd,
'import os\nos.system("whoami")',
)
class Malicious17:
def __reduce__(self):
return codecache.compile_file, ("", "", ["sh", "-c", '$(echo "pwned")'])
class Malicious18:
def __reduce__(self):
return pydoc.pipepager, ("", 'echo "pwned"')
class Malicious19:
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __reduce__(self):
return partial(torch.load, self.path, **self.kwargs), ()
class Malicious20:
def __reduce__(self):
return venv.create, ("venv", False, False, True, False, "$(echo pwned)")
class Malicious16:
def __reduce__(self):
return pip.main, (
[
"install",
"some_malicious_package",
"--no-input",
"-q",
"-q",
"-q",
"--exists-action",
"i",
"--isolated",
],
)
class HTTPResponse:
def __init__(self, status, data=None):
self.status = status
self.reason = "mock reason"
self.data = data
def read(self):
return self.data
class MockHTTPSConnection:
def __init__(self, host):
self.host = host
self.response = None
def request(self, method, path_and_query):
assert self.response is None
target = f"{method} https://{self.host}{path_and_query}"
if target == "GET https://localhost/mock/200":
self.response = HTTPResponse(200, b"mock123")
elif target == "GET https://localhost/mock/400":
self.response = HTTPResponse(400)
elif target == "GET https://localhost/mock/pickle/benign":
self.response = HTTPResponse(200, pickle.dumps({"a": 0, "b": 1, "c": 2}))
elif target == "GET https://localhost/mock/pickle/malicious":
self.response = HTTPResponse(200, pickle.dumps(Malicious2()))
elif target == "GET https://localhost/mock/zip/benign":
buffer = io.BytesIO()
with zipfile.ZipFile(buffer, "w") as zip:
zip.writestr("data.pkl", pickle.dumps({"a": 0, "b": 1, "c": 2}))
self.response = HTTPResponse(200, buffer.getbuffer())
elif target == "GET https://localhost/mock/zip/malicious":
buffer = io.BytesIO()
with zipfile.ZipFile(buffer, "w") as zip:
zip.writestr("data.pkl", pickle.dumps(Malicious1()))
self.response = HTTPResponse(200, buffer.getbuffer())
elif (
target
== "GET https://huggingface.co/api/models/ykilcher/totally-harmless-model"
):
self.response = HTTPResponse(
200, b'{"siblings": [{"rfilename": "pytorch_model.bin"}]}'
)
elif (
target
== "GET https://huggingface.co/ykilcher/totally-harmless-model/resolve/main/pytorch_model.bin"
):
buffer = io.BytesIO()
with zipfile.ZipFile(buffer, "w") as zip:
zip.writestr("archive/data.pkl", pickle.dumps(Malicious1()))
self.response = HTTPResponse(200, buffer.getbuffer())
else:
raise ValueError(f"No mock for request '{target}'")
def getresponse(self):
response = self.response
self.response = None
return response
def close(self):
pass
http.client.HTTPSConnection = MockHTTPSConnection
def initialize_pickle_file(path, obj, version):
if not os.path.exists(path):
with open(path, "wb") as file:
pickle.dump(obj, file, protocol=version)
def initialize_data_file(path, data):
if not os.path.exists(path):
with open(path, "wb") as file:
file.write(data)
def initialize_7z_file(archive_path, file_name):
file_path = f"{_root_path}/data/malicious1.pkl"
with open(file_path, "wb") as f:
pickle.dump(Malicious1(), f, protocol=4)
if not os.path.exists(archive_path):
with py7zr.SevenZipFile(archive_path, "w") as archive:
archive.write(file_path, file_name)
pathlib.Path.unlink(pathlib.Path(file_path))
def initialize_zip_file(path, file_name, data):
if not os.path.exists(path):
with zipfile.ZipFile(path, "w") as zip:
zip.writestr(file_name, data)
def initialize_numpy_files():
import numpy as np
os.makedirs(f"{_root_path}/data2", exist_ok=True)
path = f"{_root_path}/data2/object_array.npy"
if not os.path.exists(path):
x = np.empty((2, 2), dtype=object)
x[:] = [(1, 2), (3, 4)]
np.save(path, x)
path = f"{_root_path}/data2/int_array.npy"
if not os.path.exists(path):
x = np.empty((2, 2), dtype=int)
x[:] = [(1, 2), (3, 4)]
np.save(path, x)
path = f"{_root_path}/data2/object_arrays.npz"
if not os.path.exists(path):
np.savez(
path,
a=np.array([0, 1, 2], dtype=object),
b=np.array([3, 4, 5], dtype=object),
)
path = f"{_root_path}/data2/int_arrays.npz"
if not os.path.exists(path):
np.savez(
path, a=np.array([0, 1, 2], dtype=int), b=np.array([3, 4, 5], dtype=int)
)
path = f"{_root_path}/data2/object_arrays_compressed.npz"
if not os.path.exists(path):
np.savez_compressed(
path,
a=np.array([0, 1, 2], dtype=object),
b=np.array([3, 4, 5], dtype=object),
)
path = f"{_root_path}/data2/int_arrays_compressed.npz"
if not os.path.exists(path):
np.savez_compressed(
path, a=np.array([0, 1, 2], dtype=int), b=np.array([3, 4, 5], dtype=int)
)
def initialize_pickle_files():
os.makedirs(f"{_root_path}/data", exist_ok=True)
# Test with Pickle versions 0, 3, and 4:
# - Pickle versions 0, 1, 2 have built-in functions under '__builtin__' while versions 3 and 4 have them under 'builtins'
# - Pickle versions 0, 1, 2, 3 use 'GLOBAL' opcode while 4 uses 'STACK_GLOBAL' opcode
for version in (0, 3, 4):
initialize_pickle_file(
f"{_root_path}/data/benign0_v{version}.pkl", ["a", "b", "c"], version
)
initialize_pickle_file(
f"{_root_path}/data/malicious1_v{version}.pkl", Malicious1(), version
)
initialize_pickle_file(
f"{_root_path}/data/malicious2_v{version}.pkl", Malicious2(), version
)
# Malicious Pickle from https://sensepost.com/cms/resources/conferences/2011/sour_pickles/BH_US_11_Slaviero_Sour_Pickles.pdf
initialize_data_file(
f"{_root_path}/data/malicious0.pkl",
b'c__builtin__\nglobals\n(tRp100\n0c__builtin__\ncompile\n(S\'fl=open("/etc/passwd");picklesmashed=fl.read();'
+ b"'\nS''\nS'exec'\ntRp101\n0c__builtin__\neval\n(g101\ng100\ntRp102\n0c__builtin__\ngetattr\n(c__builtin__\n"
+ b"dict\nS'get'\ntRp103\n0c__builtin__\napply\n(g103\n(g100\nS'picklesmashed'\nltRp104\n0g104\n.",
)
# Malicious Pickle with an INST opcode
# 0: ( MARK
# 1: S STRING 'raise RuntimeError("Injection running")'
# 44: i INST '__builtin__ exec' (MARK at 0)
# 62: . STOP
initialize_data_file(
f"{_root_path}/data/malicious10.pkl",
b"(S'raise RuntimeError(\"Injection running\")'\ni__builtin__\nexec\n.",
)
# Malicious Pickle from Capture-the-Flag challenge 'Misc/Safe Pickle' at https://imaginaryctf.org/Challenges
# GitHub Issue: https://github.com/mmaitre314/picklescan/issues/22
initialize_data_file(
f"{_root_path}/data/malicious11.pkl",
b"".join(
[
pickle.UNICODE + b"os\n",
pickle.PUT + b"2\n",
pickle.POP,
pickle.UNICODE + b"system\n",
pickle.PUT + b"3\n",
pickle.POP,
pickle.UNICODE + b"torch\n",
pickle.PUT + b"0\n",
pickle.POP,
pickle.UNICODE + b"LongStorage\n",
pickle.PUT + b"1\n",
pickle.POP,
pickle.GET + b"2\n",
pickle.GET + b"3\n",
pickle.STACK_GLOBAL,
pickle.MARK,
pickle.UNICODE + b"cat flag.txt\n",
pickle.TUPLE,
pickle.REDUCE,
pickle.STOP,
]
),
)
initialize_data_file(
f"{_root_path}/data/malicious-invalid-bytes.pkl",
b"".join(
[
pickle.UNICODE + b"os\n",
pickle.PUT + b"2\n",
pickle.POP,
pickle.UNICODE + b"system\n",
pickle.PUT + b"3\n",
pickle.POP,
pickle.UNICODE + b"torch\n",
pickle.PUT + b"0\n",
pickle.POP,
pickle.UNICODE + b"LongStorage\n",
pickle.PUT + b"1\n",
pickle.POP,
pickle.GET + b"2\n",
pickle.GET + b"3\n",
pickle.STACK_GLOBAL,
pickle.MARK,
pickle.UNICODE + b"cat flag.txt\n",
pickle.TUPLE,
pickle.REDUCE,
pickle.STOP,
b"\n\n\t\t",
]
),
)
# Broken model
initialize_data_file(
f"{_root_path}/data/broken_model.pkl",
b"cbuiltins\nexec\n(X>\nf = open('my_file.txt', 'a'); f.write('Malicious'); f.close()tRX.",
)
# Code which created malicious12.pkl using pickleassem (see https://github.com/gousaiyang/pickleassem)
#
# p = PickleAssembler(proto=4)
#
# # get operator.attrgetter onto stack
# p.push_short_binunicode("operator")
# p.memo_memoize()
# p.push_short_binunicode("attrgetter")
# p.memo_memoize()
# p.build_stack_global()
# p.memo_memoize()
#
# # get operator.attrgetter("system") onto stack
# p.push_short_binunicode("system")
# p.memo_memoize()
# p.build_tuple1()
# p.memo_memoize()
# p.build_reduce()
# p.memo_memoize()
#
# # get os module onto stack
# p.push_short_binunicode("builtins")
# p.memo_memoize()
# p.push_short_binunicode("__import__")
# p.memo_memoize()
# p.build_stack_global()
# p.memo_memoize()
# p.push_short_binunicode("os")
# p.memo_memoize()
# p.build_tuple1()
# p.memo_memoize()
# p.build_reduce()
# p.memo_memoize()
#
# # get os.system onto stack
# p.build_tuple1()
# p.memo_memoize()
# p.build_reduce()
# p.memo_memoize()
#
# # call os.system("echo pwned")
# p.push_short_binunicode("echo pwned")
# p.memo_memoize()
# p.build_tuple1()
# p.memo_memoize()
# p.build_reduce()
# p.memo_memoize()
initialize_data_file(f"{_root_path}/data/malicious3.pkl", malicious3_pickle_bytes)
initialize_pickle_file(f"{_root_path}/data/malicious4.pickle", Malicious4(), 4)
initialize_pickle_file(f"{_root_path}/data/malicious5.pickle", Malicious5(), 4)
initialize_data_file(
f"{_root_path}/data/malicious6.pkl",
pickle.dumps(["a", "b", "c"]) + pickle.dumps(Malicious4()),
)
initialize_pickle_file(f"{_root_path}/data/malicious7.pkl", Malicious6(), 4)
initialize_pickle_file(f"{_root_path}/data/malicious8.pkl", Malicious7(), 4)
initialize_pickle_file(f"{_root_path}/data/malicious9.pkl", Malicious8(), 4)
initialize_pickle_file(
f"{_root_path}/data/malicious13a.pkl", Malicious13(), 0
) # pickle module serialized as cpickle
initialize_pickle_file(
f"{_root_path}/data/malicious13b.pkl", Malicious13(), 4
) # pickle module serialized as _pickle
initialize_pickle_file(
f"{_root_path}/data/malicious14.pkl", Malicious14(), 4
) # runpy
initialize_pickle_file(f"{_root_path}/data/malicious15a.pkl", Malicious15(), 2)
initialize_pickle_file(f"{_root_path}/data/malicious15b.pkl", Malicious15(), 4)
initialize_pickle_file(f"{_root_path}/data/malicious16.pkl", Malicious16(), 0)
initialize_pickle_file(f"{_root_path}/data/malicious17.pkl", Malicious17(), 4)
initialize_pickle_file(f"{_root_path}/data/malicious18.pkl", Malicious18(), 4)
# This exploit serializes kwargs and passes them into a torch.load call
initialize_pickle_file(
f"{_root_path}/data/malicious19.pkl",
Malicious19(
"some_other_model.bin", pickle_file="config.json", weights_only=False
),
4,
)
initialize_pickle_file(f"{_root_path}/data/malicious20.pkl", Malicious20(), 4)
initialize_7z_file(
f"{_root_path}/data/malicious1.7z",
"data.pkl",
)
initialize_zip_file(
f"{_root_path}/data/malicious1.zip",
"data.pkl",
pickle.dumps(Malicious1(), protocol=4),
)
initialize_zip_file(
f"{_root_path}/data/malicious1_wrong_ext.zip",
"data.txt", # Pickle file with a non-standard extension
pickle.dumps(Malicious1(), protocol=4),
)
# Fake PyTorch file (PNG file format) simulating https://huggingface.co/RectalWorm/loras_new/blob/main/Owl_Mage_no_background.pt
initialize_data_file(f"{_root_path}/data/bad_pytorch.pt", b"\211PNG\r\n\032\n")
initialize_pickle_files()
initialize_numpy_files()
def compare_scan_results(sr1: ScanResult, sr2: ScanResult):
test_case = TestCase()
assert sr1.scanned_files == sr2.scanned_files
assert sr1.issues_count == sr2.issues_count
assert sr1.infected_files == sr2.infected_files
test_case.assertCountEqual(sr1.globals, sr2.globals)
def test_http_get():
assert _http_get("https://localhost/mock/200") == b"mock123"
with pytest.raises(RuntimeError):
_http_get("https://localhost/mock/400")
def test_list_globals():
assert _list_globals(io.BytesIO(pickle.dumps(Malicious1()))) == {
("builtins", "eval")
}
def test_scan_pickle_bytes():
assert scan_pickle_bytes(
io.BytesIO(pickle.dumps(Malicious1())), "file.pkl"
) == ScanResult([Global("builtins", "eval", SafetyLevel.Dangerous)], 1, 1, 1)
def test_scan_zip_bytes():
buffer = io.BytesIO()
with zipfile.ZipFile(buffer, "w") as zip:
zip.writestr("data.pkl", pickle.dumps(Malicious1()))
assert scan_zip_bytes(io.BytesIO(buffer.getbuffer()), "test.zip") == ScanResult(
[Global("builtins", "eval", SafetyLevel.Dangerous)], 1, 1, 1
)
def test_scan_numpy():
with open(f"{_root_path}/data2/object_array.npy", "rb") as f:
compare_scan_results(
scan_numpy(io.BytesIO(f.read()), "object_array.npy"),
ScanResult(
[
Global(
"numpy.core.multiarray", "_reconstruct", SafetyLevel.Innocuous
),
Global("numpy", "ndarray", SafetyLevel.Innocuous),
Global("numpy", "dtype", SafetyLevel.Innocuous),
],
1,
0,
0,
),
)
with open(f"{_root_path}/data2/int_array.npy", "rb") as f:
compare_scan_results(
scan_numpy(io.BytesIO(f.read()), "int_array.npy"),
ScanResult(
[],
1,
0,
0,
),
)
def test_scan_pytorch():
scan_result = ScanResult(
[
Global("torch", "FloatStorage", SafetyLevel.Innocuous),
Global("collections", "OrderedDict", SafetyLevel.Innocuous),
Global("torch._utils", "_rebuild_tensor_v2", SafetyLevel.Innocuous),
],
1,
0,
0,
)
with open(f"{_root_path}/data/pytorch_model.bin", "rb") as f:
compare_scan_results(
scan_pytorch(io.BytesIO(f.read()), "pytorch_model.bin"), scan_result
)
with open(f"{_root_path}/data/new_pytorch_model.bin", "rb") as f:
compare_scan_results(
scan_pytorch(io.BytesIO(f.read()), "pytorch_model.bin"), scan_result
)
def test_scan_file_path():
safe = ScanResult([], 1, 0, 0)
compare_scan_results(scan_file_path(f"{_root_path}/data/benign0_v3.pkl"), safe)
pytorch = ScanResult(
[
Global("torch", "FloatStorage", SafetyLevel.Innocuous),
Global("collections", "OrderedDict", SafetyLevel.Innocuous),
Global("torch._utils", "_rebuild_tensor_v2", SafetyLevel.Innocuous),
],
1,
0,
0,
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/pytorch_model.bin"), pytorch
)
malicious0 = ScanResult(
[
Global("__builtin__", "compile", SafetyLevel.Dangerous),
Global("__builtin__", "globals", SafetyLevel.Suspicious),
Global("__builtin__", "dict", SafetyLevel.Suspicious),
Global("__builtin__", "apply", SafetyLevel.Dangerous),
Global("__builtin__", "getattr", SafetyLevel.Dangerous),
Global("__builtin__", "eval", SafetyLevel.Dangerous),
],
1,
4,
1,
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious0.pkl"), malicious0
)
malicious1_v0 = ScanResult(
[Global("__builtin__", "eval", SafetyLevel.Dangerous)], 1, 1, 1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious1_v0.pkl"), malicious1_v0
)
malicious1 = ScanResult(
[Global("builtins", "eval", SafetyLevel.Dangerous)], 1, 1, 1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious1_v3.pkl"), malicious1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious1_v4.pkl"), malicious1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious1.zip"), malicious1
)
compare_scan_results(scan_file_path(f"{_root_path}/data/malicious1.7z"), malicious1)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious1_wrong_ext.zip"), malicious1
)
malicious2 = ScanResult([Global("posix", "system", SafetyLevel.Dangerous)], 1, 1, 1)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious2_v0.pkl"), malicious2
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious2_v3.pkl"), malicious2
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious2_v4.pkl"), malicious2
)
malicious3 = ScanResult(
[Global("httplib", "HTTPSConnection", SafetyLevel.Dangerous)], 1, 1, 1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious3.pkl"), malicious3
)
malicious4 = ScanResult(
[Global("requests.api", "get", SafetyLevel.Dangerous)], 1, 1, 1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious4.pickle"), malicious4
)
malicious5 = ScanResult(
[Global("aiohttp.client", "ClientSession", SafetyLevel.Dangerous)], 1, 1, 1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious5.pickle"), malicious5
)
malicious6 = ScanResult(
[Global("requests.api", "get", SafetyLevel.Dangerous)], 1, 1, 1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious6.pkl"), malicious6
)
malicious7 = ScanResult(
[Global("socket", "create_connection", SafetyLevel.Dangerous)], 1, 1, 1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious7.pkl"), malicious7
)
malicious8 = ScanResult(
[Global("subprocess", "run", SafetyLevel.Dangerous)], 1, 1, 1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious8.pkl"), malicious8
)
malicious9 = ScanResult([Global("sys", "exit", SafetyLevel.Dangerous)], 1, 1, 1)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious9.pkl"), malicious9
)
malicious10 = ScanResult(
[Global("__builtin__", "exec", SafetyLevel.Dangerous)], 1, 1, 1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious10.pkl"), malicious10
)
bad_pytorch = ScanResult([], 0, 0, 0, True)
compare_scan_results(
scan_file_path(f"{_root_path}/data/bad_pytorch.pt"), bad_pytorch
)
malicious14 = ScanResult(
[Global("runpy", "_run_code", SafetyLevel.Dangerous)], 1, 1, 1
)
compare_scan_results(
scan_file_path(f"{_root_path}/data/malicious14.pkl"), malicious14
)
def test_scan_file_path_npz():
compare_scan_results(
scan_file_path(f"{_root_path}/data2/object_arrays.npz"),
ScanResult(
[
Global("numpy.core.multiarray", "_reconstruct", SafetyLevel.Innocuous),
Global("numpy", "ndarray", SafetyLevel.Innocuous),
Global("numpy", "dtype", SafetyLevel.Innocuous),
]
* 2,
2,
0,
0,
),
)
compare_scan_results(
scan_file_path(f"{_root_path}/data2/int_arrays.npz"),
ScanResult(
[],
2,
0,
0,
),
)
compare_scan_results(
scan_file_path(f"{_root_path}/data2/object_arrays_compressed.npz"),
ScanResult(
[
Global("numpy.core.multiarray", "_reconstruct", SafetyLevel.Innocuous),
Global("numpy", "ndarray", SafetyLevel.Innocuous),
Global("numpy", "dtype", SafetyLevel.Innocuous),
]
* 2,
2,
0,
0,
),
)
compare_scan_results(
scan_file_path(f"{_root_path}/data2/int_arrays_compressed.npz"),
ScanResult(
[],
2,
0,
0,
),
)
def test_scan_directory_path():
sr = ScanResult(
globals=[
Global("builtins", "eval", SafetyLevel.Dangerous),
Global("httplib", "HTTPSConnection", SafetyLevel.Dangerous),
Global("collections", "OrderedDict", SafetyLevel.Innocuous),
Global("torch._utils", "_rebuild_tensor_v2", SafetyLevel.Innocuous),
Global("torch", "FloatStorage", SafetyLevel.Innocuous),
Global("subprocess", "run", SafetyLevel.Dangerous),
Global("posix", "system", SafetyLevel.Dangerous),
Global("posix", "system", SafetyLevel.Dangerous),
Global("requests.api", "get", SafetyLevel.Dangerous),
Global("posix", "system", SafetyLevel.Dangerous),
Global("aiohttp.client", "ClientSession", SafetyLevel.Dangerous),
Global("__builtin__", "eval", SafetyLevel.Dangerous),
Global("sys", "exit", SafetyLevel.Dangerous),
Global("__builtin__", "eval", SafetyLevel.Dangerous),
Global("__builtin__", "compile", SafetyLevel.Dangerous),
Global("__builtin__", "dict", SafetyLevel.Suspicious),
Global("__builtin__", "apply", SafetyLevel.Dangerous),
Global("__builtin__", "getattr", SafetyLevel.Dangerous),
Global("__builtin__", "getattr", SafetyLevel.Dangerous),
Global("__builtin__", "globals", SafetyLevel.Suspicious),
Global("requests.api", "get", SafetyLevel.Dangerous),
Global("builtins", "eval", SafetyLevel.Dangerous),
Global("builtins", "eval", SafetyLevel.Dangerous),
Global("runpy", "_run_code", SafetyLevel.Dangerous),
Global("socket", "create_connection", SafetyLevel.Dangerous),
Global("collections", "OrderedDict", SafetyLevel.Innocuous),
Global("torch._utils", "_rebuild_tensor_v2", SafetyLevel.Innocuous),
Global("torch", "FloatStorage", SafetyLevel.Innocuous),
Global("_rebuild_tensor", "unknown", SafetyLevel.Dangerous),
Global("torch._utils", "_rebuild_tensor", SafetyLevel.Suspicious),
Global("torch", "_utils", SafetyLevel.Suspicious),
Global("__builtin__", "exec", SafetyLevel.Dangerous),
Global("os", "system", SafetyLevel.Dangerous),
Global("os", "system", SafetyLevel.Dangerous),
Global("operator", "attrgetter", SafetyLevel.Dangerous),
Global("builtins", "__import__", SafetyLevel.Suspicious),
Global("pickle", "loads", SafetyLevel.Dangerous),
Global("_pickle", "loads", SafetyLevel.Dangerous),
Global("_codecs", "encode", SafetyLevel.Suspicious),
Global("bdb", "Bdb", SafetyLevel.Dangerous),
Global("bdb", "Bdb", SafetyLevel.Dangerous),
Global("bdb", "Bdb.run", SafetyLevel.Dangerous),
Global("builtins", "exec", SafetyLevel.Dangerous),
Global("builtins", "eval", SafetyLevel.Dangerous),
Global("venv", "create", SafetyLevel.Dangerous),
Global("torch._inductor.codecache", "compile_file", SafetyLevel.Dangerous),
Global("pydoc", "pipepager", SafetyLevel.Dangerous),
Global("torch.serialization", "load", SafetyLevel.Dangerous),
Global("functools", "partial", SafetyLevel.Dangerous),
Global("pip", "main", SafetyLevel.Dangerous),
Global("builtins", "eval", SafetyLevel.Dangerous),
],
scanned_files=38,
issues_count=39,
infected_files=33,
scan_err=True,
)
compare_scan_results(scan_directory_path(f"{_root_path}/data/"), sr)
def test_scan_url():
safe = ScanResult([], 1, 0, 0)
compare_scan_results(scan_url("https://localhost/mock/pickle/benign"), safe)
compare_scan_results(scan_url("https://localhost/mock/zip/benign"), safe)
malicious = ScanResult([Global(os.name, "system", SafetyLevel.Dangerous)], 1, 1, 1)
compare_scan_results(scan_url("https://localhost/mock/pickle/malicious"), malicious)
malicious_zip = ScanResult(
[Global("builtins", "eval", SafetyLevel.Dangerous)], 1, 1, 1
)
compare_scan_results(
scan_url("https://localhost/mock/zip/malicious"), malicious_zip
)
def test_scan_huggingface_model():
eval_sr = ScanResult([Global("builtins", "eval", SafetyLevel.Dangerous)], 1, 1, 1)
compare_scan_results(
scan_huggingface_model("ykilcher/totally-harmless-model"), eval_sr
)
def test_main():
argv = sys.argv
try:
sys.argv = ["picklescan", "-u", "https://localhost/mock/pickle/benign"]
assert main() == 0
importlib.import_module("picklescan.__main__")
finally:
sys.argv = argv
def test_pickle_files():
with open(f"{_root_path}/data/malicious13a.pkl", "rb") as file:
assert pickle.load(file) == 12345
with open(f"{_root_path}/data/malicious13b.pkl", "rb") as file:
assert pickle.load(file) == 12345
def test_invalid_bytes_err():
malicious_invalid_bytes = ScanResult(
[Global("os", "system", SafetyLevel.Dangerous)], 1, 1, 1, True
)
with open(f"{_root_path}/data/malicious-invalid-bytes.pkl", "rb") as file:
compare_scan_results(
scan_pickle_bytes(file, f"{_root_path}/data/malicious-invalid-bytes.pkl"),
malicious_invalid_bytes,
)
|
GHSA-2fh4-gpch-vqv4
|
src/fides/api/service/privacy_request/dsr_package/dsr_report_builder.py
|
@@ -45,7 +45,9 @@ def pretty_print(value: str, indent: int = 4) -> str:
return json.dumps(value, indent=indent, default=storage_json_encoder)
jinja2.filters.FILTERS["pretty_print"] = pretty_print
- self.template_loader = Environment(loader=FileSystemLoader(DSR_DIRECTORY))
+ self.template_loader = Environment(
+ loader=FileSystemLoader(DSR_DIRECTORY), autoescape=True
+ )
# to pass in custom colors in the future
self.template_data: Dict[str, Any] = {
@@ -75,7 +77,8 @@ def _populate_template(
}
report_data.update(self.template_data)
template = self.template_loader.get_template(template_path)
- return template.render(report_data)
+ rendered_template = template.render(report_data)
+ return rendered_template
def _add_file(self, filename: str, contents: str) -> None:
"""Helper to add a file to the zip archive"""
|
import json
import os
import zipfile
from collections import defaultdict
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List, Optional
import jinja2
from jinja2 import Environment, FileSystemLoader
from fides.api.models.privacy_request import PrivacyRequest
from fides.api.schemas.policy import ActionType
from fides.api.schemas.redis_cache import Identity
from fides.api.util.storage_util import storage_json_encoder
DSR_DIRECTORY = Path(__file__).parent.resolve()
TEXT_COLOR = "#4A5568"
HEADER_COLOR = "#F7FAFC"
BORDER_COLOR = "#E2E8F0"
# pylint: disable=too-many-instance-attributes
class DsrReportBuilder:
def __init__(
self,
privacy_request: PrivacyRequest,
dsr_data: Dict[str, Any],
):
"""
Manages populating HTML templates from the given data and adding the generated
pages to a zip file in a way that the pages can be navigated between.
"""
# zip file variables
self.baos = BytesIO()
# we close this in the finally block of generate()
# pylint: disable=consider-using-with
self.out = zipfile.ZipFile(self.baos, "w")
# Jinja template environment initialization
def pretty_print(value: str, indent: int = 4) -> str:
return json.dumps(value, indent=indent, default=storage_json_encoder)
jinja2.filters.FILTERS["pretty_print"] = pretty_print
self.template_loader = Environment(loader=FileSystemLoader(DSR_DIRECTORY))
# to pass in custom colors in the future
self.template_data: Dict[str, Any] = {
"text_color": TEXT_COLOR,
"header_color": HEADER_COLOR,
"border_color": BORDER_COLOR,
}
self.main_links: Dict[str, Any] = {} # used to track the generated pages
# report data to populate the templates
self.request_data = _map_privacy_request(privacy_request)
self.dsr_data = dsr_data
def _populate_template(
self,
template_path: str,
heading: Optional[str] = None,
description: Optional[str] = None,
data: Optional[Dict[str, Any]] = None,
) -> str:
"""Generates a file from the template and data"""
report_data = {
"heading": heading,
"description": description,
"data": data,
"request": self.request_data,
}
report_data.update(self.template_data)
template = self.template_loader.get_template(template_path)
return template.render(report_data)
def _add_file(self, filename: str, contents: str) -> None:
"""Helper to add a file to the zip archive"""
if filename and contents:
self.out.writestr(f"{filename}", contents.encode("utf-8"))
def _add_dataset(self, dataset_name: str, collections: Dict[str, Any]) -> None:
"""
Generates a page for each collection in the dataset and an index page for the dataset.
Tracks the generated links to build a root level index after each collection has been processed.
"""
# track links to collection indexes
collection_links = {}
for collection_name, rows in collections.items():
collection_url = f"{collection_name}/index.html"
self._add_collection(rows, dataset_name, collection_name)
collection_links[collection_name] = collection_url
# generate dataset index page
self._add_file(
f"/data/{dataset_name}/index.html",
self._populate_template(
"templates/dataset_index.html",
dataset_name,
None,
collection_links,
),
)
def _add_collection(
self, rows: List[Dict[str, Any]], dataset_name: str, collection_name: str
) -> None:
# track links to detail pages
detail_links = {}
for index, item in enumerate(rows, 1):
detail_url = f"{index}.html"
self._add_file(
f"/data/{dataset_name}/{collection_name}/{index}.html",
self._populate_template(
"templates/item.html",
f"{collection_name} (item #{index})",
None,
item,
),
)
detail_links[f"item #{index}"] = detail_url
# generate detail index page
self._add_file(
f"/data/{dataset_name}/{collection_name}/index.html",
self._populate_template(
"templates/collection_index.html",
collection_name,
None,
detail_links,
),
)
def generate(self) -> BytesIO:
"""
Processes the request and DSR data to build zip file containing the DSR report.
Returns the zip file as an in-memory byte array.
"""
try:
# all the css for the pages is in main.css
self._add_file(
"/data/main.css",
self._populate_template("templates/main.css"),
)
self._add_file(
"/data/back.svg",
Path(os.path.join(DSR_DIRECTORY, "assets/back.svg")).read_text(
encoding="utf-8"
),
)
# pre-process data to split the dataset:collection keys
datasets: Dict[str, Any] = defaultdict(lambda: defaultdict(list))
for key, rows in self.dsr_data.items():
parts = key.split(":", 1)
dataset_name, collection_name = (
parts if len(parts) > 1 else ("manual", parts[0])
)
datasets[dataset_name][collection_name].extend(rows)
for dataset_name, collections in datasets.items():
self._add_dataset(dataset_name, collections)
self.main_links[dataset_name] = f"data/{dataset_name}/index.html"
# create the main index once all the datasets have been added
self._add_file(
"/welcome.html",
self._populate_template(
"templates/welcome.html", "DSR Report", None, self.main_links
),
)
finally:
# close out zip file in the finally block to always close, even when an exception occurs
self.out.close()
# reset the file pointer so the file can be fully read by the caller
self.baos.seek(0)
return self.baos
def _map_privacy_request(privacy_request: PrivacyRequest) -> Dict[str, Any]:
"""Creates a map with a subset of values from the privacy request"""
request_data = {}
request_data["id"] = privacy_request.id
action_type: Optional[ActionType] = privacy_request.policy.get_action_type()
if action_type:
request_data["type"] = action_type.value
identity: Identity = privacy_request.get_persisted_identity()
if identity.email:
request_data["email"] = identity.email
if privacy_request.requested_at:
request_data["requested_at"] = privacy_request.requested_at.strftime(
"%m/%d/%Y %H:%M %Z"
)
return request_data
|
GHSA-3vpf-mcj7-5h38
|
bson/__init__.py
|
@@ -150,7 +150,7 @@ def _get_object(data, position, as_class, tz_aware, uuid_subtype):
object = _elements_to_dict(encoded, as_class, tz_aware, uuid_subtype)
position += obj_size
if "$ref" in object:
- return (DBRef(object.pop("$ref"), object.pop("$id"),
+ return (DBRef(object.pop("$ref"), object.pop("$id", None),
object.pop("$db", None), object), position)
return object, position
|
# Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BSON (Binary JSON) encoding and decoding.
"""
import calendar
import datetime
import re
import struct
import sys
from bson.binary import (Binary, OLD_UUID_SUBTYPE,
JAVA_LEGACY, CSHARP_LEGACY)
from bson.code import Code
from bson.dbref import DBRef
from bson.errors import (InvalidBSON,
InvalidDocument,
InvalidStringData)
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.py3compat import b, binary_type
from bson.son import SON, RE_TYPE
from bson.timestamp import Timestamp
from bson.tz_util import utc
try:
from bson import _cbson
_use_c = True
except ImportError:
_use_c = False
try:
import uuid
_use_uuid = True
except ImportError:
_use_uuid = False
PY3 = sys.version_info[0] == 3
MAX_INT32 = 2147483647
MIN_INT32 = -2147483648
MAX_INT64 = 9223372036854775807
MIN_INT64 = -9223372036854775808
EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc)
EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0)
# Create constants compatible with all versions of
# python from 2.4 forward. In 2.x b("foo") is just
# "foo". In 3.x it becomes b"foo".
EMPTY = b("")
ZERO = b("\x00")
ONE = b("\x01")
BSONNUM = b("\x01") # Floating point
BSONSTR = b("\x02") # UTF-8 string
BSONOBJ = b("\x03") # Embedded document
BSONARR = b("\x04") # Array
BSONBIN = b("\x05") # Binary
BSONUND = b("\x06") # Undefined
BSONOID = b("\x07") # ObjectId
BSONBOO = b("\x08") # Boolean
BSONDAT = b("\x09") # UTC Datetime
BSONNUL = b("\x0A") # Null
BSONRGX = b("\x0B") # Regex
BSONREF = b("\x0C") # DBRef
BSONCOD = b("\x0D") # Javascript code
BSONSYM = b("\x0E") # Symbol
BSONCWS = b("\x0F") # Javascript code with scope
BSONINT = b("\x10") # 32bit int
BSONTIM = b("\x11") # Timestamp
BSONLON = b("\x12") # 64bit int
BSONMIN = b("\xFF") # Min key
BSONMAX = b("\x7F") # Max key
def _get_int(data, position, as_class=None,
tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE, unsigned=False):
format = unsigned and "I" or "i"
try:
value = struct.unpack("<%s" % format, data[position:position + 4])[0]
except struct.error:
raise InvalidBSON()
position += 4
return value, position
def _get_c_string(data, position, length=None):
if length is None:
try:
end = data.index(ZERO, position)
except ValueError:
raise InvalidBSON()
else:
end = position + length
value = data[position:end].decode("utf-8")
position = end + 1
return value, position
def _make_c_string(string, check_null=False):
if isinstance(string, unicode):
if check_null and "\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NULL character")
return string.encode("utf-8") + ZERO
else:
if check_null and ZERO in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NULL character")
try:
string.decode("utf-8")
return string + ZERO
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % string)
def _get_number(data, position, as_class, tz_aware, uuid_subtype):
num = struct.unpack("<d", data[position:position + 8])[0]
position += 8
return num, position
def _get_string(data, position, as_class, tz_aware, uuid_subtype):
length = struct.unpack("<i", data[position:position + 4])[0] - 1
position += 4
return _get_c_string(data, position, length)
def _get_object(data, position, as_class, tz_aware, uuid_subtype):
obj_size = struct.unpack("<i", data[position:position + 4])[0]
encoded = data[position + 4:position + obj_size - 1]
object = _elements_to_dict(encoded, as_class, tz_aware, uuid_subtype)
position += obj_size
if "$ref" in object:
return (DBRef(object.pop("$ref"), object.pop("$id"),
object.pop("$db", None), object), position)
return object, position
def _get_array(data, position, as_class, tz_aware, uuid_subtype):
obj, position = _get_object(data, position,
as_class, tz_aware, uuid_subtype)
result = []
i = 0
while True:
try:
result.append(obj[str(i)])
i += 1
except KeyError:
break
return result, position
def _get_binary(data, position, as_class, tz_aware, uuid_subtype):
length, position = _get_int(data, position)
subtype = ord(data[position:position + 1])
position += 1
if subtype == 2:
length2, position = _get_int(data, position)
if length2 != length - 4:
raise InvalidBSON("invalid binary (st 2) - lengths don't match!")
length = length2
if subtype in (3, 4) and _use_uuid:
# Java Legacy
if uuid_subtype == JAVA_LEGACY:
java = data[position:position + length]
value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1])
# C# legacy
elif uuid_subtype == CSHARP_LEGACY:
value = uuid.UUID(bytes_le=data[position:position + length])
# Python
else:
value = uuid.UUID(bytes=data[position:position + length])
position += length
return (value, position)
# Python3 special case. Decode subtype 0 to 'bytes'.
if PY3 and subtype == 0:
value = data[position:position + length]
else:
value = Binary(data[position:position + length], subtype)
position += length
return value, position
def _get_oid(data, position, as_class=None,
tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE):
value = ObjectId(data[position:position + 12])
position += 12
return value, position
def _get_boolean(data, position, as_class, tz_aware, uuid_subtype):
value = data[position:position + 1] == ONE
position += 1
return value, position
def _get_date(data, position, as_class, tz_aware, uuid_subtype):
millis = struct.unpack("<q", data[position:position + 8])[0]
diff = millis % 1000
seconds = (millis - diff) / 1000
position += 8
if tz_aware:
dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds)
else:
dt = EPOCH_NAIVE + datetime.timedelta(seconds=seconds)
return dt.replace(microsecond=diff * 1000), position
def _get_code(data, position, as_class, tz_aware, uuid_subtype):
code, position = _get_string(data, position,
as_class, tz_aware, uuid_subtype)
return Code(code), position
def _get_code_w_scope(data, position, as_class, tz_aware, uuid_subtype):
_, position = _get_int(data, position)
code, position = _get_string(data, position,
as_class, tz_aware, uuid_subtype)
scope, position = _get_object(data, position,
as_class, tz_aware, uuid_subtype)
return Code(code, scope), position
def _get_null(data, position, as_class, tz_aware, uuid_subtype):
return None, position
def _get_regex(data, position, as_class, tz_aware, uuid_subtype):
pattern, position = _get_c_string(data, position)
bson_flags, position = _get_c_string(data, position)
flags = 0
if "i" in bson_flags:
flags |= re.IGNORECASE
if "l" in bson_flags:
flags |= re.LOCALE
if "m" in bson_flags:
flags |= re.MULTILINE
if "s" in bson_flags:
flags |= re.DOTALL
if "u" in bson_flags:
flags |= re.UNICODE
if "x" in bson_flags:
flags |= re.VERBOSE
return re.compile(pattern, flags), position
def _get_ref(data, position, as_class, tz_aware, uuid_subtype):
position += 4
collection, position = _get_c_string(data, position)
oid, position = _get_oid(data, position)
return DBRef(collection, oid), position
def _get_timestamp(data, position, as_class, tz_aware, uuid_subtype):
inc, position = _get_int(data, position, unsigned=True)
timestamp, position = _get_int(data, position, unsigned=True)
return Timestamp(timestamp, inc), position
def _get_long(data, position, as_class, tz_aware, uuid_subtype):
# Have to cast to long; on 32-bit unpack may return an int.
# 2to3 will change long to int. That's fine since long doesn't
# exist in python3.
value = long(struct.unpack("<q", data[position:position + 8])[0])
position += 8
return value, position
_element_getter = {
BSONNUM: _get_number,
BSONSTR: _get_string,
BSONOBJ: _get_object,
BSONARR: _get_array,
BSONBIN: _get_binary,
BSONUND: _get_null, # undefined
BSONOID: _get_oid,
BSONBOO: _get_boolean,
BSONDAT: _get_date,
BSONNUL: _get_null,
BSONRGX: _get_regex,
BSONREF: _get_ref,
BSONCOD: _get_code, # code
BSONSYM: _get_string, # symbol
BSONCWS: _get_code_w_scope,
BSONINT: _get_int, # number_int
BSONTIM: _get_timestamp,
BSONLON: _get_long, # Same as _get_int after 2to3 runs.
BSONMIN: lambda v, w, x, y, z: (MinKey(), w),
BSONMAX: lambda v, w, x, y, z: (MaxKey(), w)}
def _element_to_dict(data, position, as_class, tz_aware, uuid_subtype):
element_type = data[position:position + 1]
position += 1
element_name, position = _get_c_string(data, position)
value, position = _element_getter[element_type](data, position, as_class,
tz_aware, uuid_subtype)
return element_name, value, position
def _elements_to_dict(data, as_class, tz_aware, uuid_subtype):
result = as_class()
position = 0
end = len(data) - 1
while position < end:
(key, value, position) = _element_to_dict(data, position, as_class,
tz_aware, uuid_subtype)
result[key] = value
return result
def _bson_to_dict(data, as_class, tz_aware, uuid_subtype):
obj_size = struct.unpack("<i", data[:4])[0]
length = len(data)
if length < obj_size:
raise InvalidBSON("objsize too large")
if obj_size != length or data[obj_size - 1:obj_size] != ZERO:
raise InvalidBSON("bad eoo")
elements = data[4:obj_size - 1]
return (_elements_to_dict(elements, as_class,
tz_aware, uuid_subtype), data[obj_size:])
if _use_c:
_bson_to_dict = _cbson._bson_to_dict
def _element_to_bson(key, value, check_keys, uuid_subtype):
if not isinstance(key, basestring):
raise InvalidDocument("documents must have only string keys, "
"key was %r" % key)
if check_keys:
if key.startswith("$"):
raise InvalidDocument("key %r must not start with '$'" % key)
if "." in key:
raise InvalidDocument("key %r must not contain '.'" % key)
name = _make_c_string(key, True)
if isinstance(value, float):
return BSONNUM + name + struct.pack("<d", value)
if _use_uuid:
if isinstance(value, uuid.UUID):
# Java Legacy
if uuid_subtype == JAVA_LEGACY:
# Python 3.0(.1) returns a bytearray instance for bytes (3.1
# and newer just return a bytes instance). Convert that to
# binary_type (here and below) for compatibility.
from_uuid = binary_type(value.bytes)
as_legacy_java = from_uuid[0:8][::-1] + from_uuid[8:16][::-1]
value = Binary(as_legacy_java, subtype=OLD_UUID_SUBTYPE)
# C# legacy
elif uuid_subtype == CSHARP_LEGACY:
# Microsoft GUID representation.
value = Binary(binary_type(value.bytes_le),
subtype=OLD_UUID_SUBTYPE)
# Python
else:
value = Binary(binary_type(value.bytes), subtype=uuid_subtype)
if isinstance(value, Binary):
subtype = value.subtype
if subtype == 2:
value = struct.pack("<i", len(value)) + value
return (BSONBIN + name +
struct.pack("<i", len(value)) + b(chr(subtype)) + value)
if isinstance(value, Code):
cstring = _make_c_string(value)
if not value.scope:
length = struct.pack("<i", len(cstring))
return BSONCOD + name + length + cstring
scope = _dict_to_bson(value.scope, False, uuid_subtype, False)
full_length = struct.pack("<i", 8 + len(cstring) + len(scope))
length = struct.pack("<i", len(cstring))
return BSONCWS + name + full_length + length + cstring + scope
if isinstance(value, binary_type):
if PY3:
# Python3 special case. Store 'bytes' as BSON binary subtype 0.
return (BSONBIN + name +
struct.pack("<i", len(value)) + ZERO + value)
cstring = _make_c_string(value)
length = struct.pack("<i", len(cstring))
return BSONSTR + name + length + cstring
if isinstance(value, unicode):
cstring = _make_c_string(value)
length = struct.pack("<i", len(cstring))
return BSONSTR + name + length + cstring
if isinstance(value, dict):
return BSONOBJ + name + _dict_to_bson(value, check_keys, uuid_subtype, False)
if isinstance(value, (list, tuple)):
as_dict = SON(zip([str(i) for i in range(len(value))], value))
return BSONARR + name + _dict_to_bson(as_dict, check_keys, uuid_subtype, False)
if isinstance(value, ObjectId):
return BSONOID + name + value.binary
if value is True:
return BSONBOO + name + ONE
if value is False:
return BSONBOO + name + ZERO
if isinstance(value, int):
# TODO this is an ugly way to check for this...
if value > MAX_INT64 or value < MIN_INT64:
raise OverflowError("BSON can only handle up to 8-byte ints")
if value > MAX_INT32 or value < MIN_INT32:
return BSONLON + name + struct.pack("<q", value)
return BSONINT + name + struct.pack("<i", value)
# 2to3 will convert long to int here since there is no long in python3.
# That's OK. The previous if block will match instead.
if isinstance(value, long):
if value > MAX_INT64 or value < MIN_INT64:
raise OverflowError("BSON can only handle up to 8-byte ints")
return BSONLON + name + struct.pack("<q", value)
if isinstance(value, datetime.datetime):
if value.utcoffset() is not None:
value = value - value.utcoffset()
millis = int(calendar.timegm(value.timetuple()) * 1000 +
value.microsecond / 1000)
return BSONDAT + name + struct.pack("<q", millis)
if isinstance(value, Timestamp):
time = struct.pack("<I", value.time)
inc = struct.pack("<I", value.inc)
return BSONTIM + name + inc + time
if value is None:
return BSONNUL + name
if isinstance(value, RE_TYPE):
pattern = value.pattern
flags = ""
if value.flags & re.IGNORECASE:
flags += "i"
if value.flags & re.LOCALE:
flags += "l"
if value.flags & re.MULTILINE:
flags += "m"
if value.flags & re.DOTALL:
flags += "s"
if value.flags & re.UNICODE:
flags += "u"
if value.flags & re.VERBOSE:
flags += "x"
return BSONRGX + name + _make_c_string(pattern, True) + \
_make_c_string(flags)
if isinstance(value, DBRef):
return _element_to_bson(key, value.as_doc(), False, uuid_subtype)
if isinstance(value, MinKey):
return BSONMIN + name
if isinstance(value, MaxKey):
return BSONMAX + name
raise InvalidDocument("cannot convert value of type %s to bson" %
type(value))
def _dict_to_bson(dict, check_keys, uuid_subtype, top_level=True):
try:
elements = []
if top_level and "_id" in dict:
elements.append(_element_to_bson("_id", dict["_id"], False, uuid_subtype))
for (key, value) in dict.iteritems():
if not top_level or key != "_id":
elements.append(_element_to_bson(key, value, check_keys, uuid_subtype))
except AttributeError:
raise TypeError("encoder expected a mapping type but got: %r" % dict)
encoded = EMPTY.join(elements)
length = len(encoded) + 5
return struct.pack("<i", length) + encoded + ZERO
if _use_c:
_dict_to_bson = _cbson._dict_to_bson
def decode_all(data, as_class=dict,
tz_aware=True, uuid_subtype=OLD_UUID_SUBTYPE):
"""Decode BSON data to multiple documents.
`data` must be a string of concatenated, valid, BSON-encoded
documents.
:Parameters:
- `data`: BSON data
- `as_class` (optional): the class to use for the resulting
documents
- `tz_aware` (optional): if ``True``, return timezone-aware
:class:`~datetime.datetime` instances
.. versionadded:: 1.9
"""
docs = []
position = 0
end = len(data) - 1
while position < end:
obj_size = struct.unpack("<i", data[position:position + 4])[0]
if len(data) - position < obj_size:
raise InvalidBSON("objsize too large")
if data[position + obj_size - 1:position + obj_size] != ZERO:
raise InvalidBSON("bad eoo")
elements = data[position + 4:position + obj_size - 1]
position += obj_size
docs.append(_elements_to_dict(elements, as_class,
tz_aware, uuid_subtype))
return docs
if _use_c:
decode_all = _cbson.decode_all
def is_valid(bson):
"""Check that the given string represents valid :class:`BSON` data.
Raises :class:`TypeError` if `bson` is not an instance of
:class:`str` (:class:`bytes` in python 3). Returns ``True``
if `bson` is valid :class:`BSON`, ``False`` otherwise.
:Parameters:
- `bson`: the data to be validated
"""
if not isinstance(bson, binary_type):
raise TypeError("BSON data must be an instance "
"of a subclass of %s" % (binary_type.__name__,))
try:
(_, remainder) = _bson_to_dict(bson, dict, True, OLD_UUID_SUBTYPE)
return remainder == EMPTY
except:
return False
class BSON(binary_type):
"""BSON (Binary JSON) data.
"""
@classmethod
def encode(cls, document, check_keys=False, uuid_subtype=OLD_UUID_SUBTYPE):
"""Encode a document to a new :class:`BSON` instance.
A document can be any mapping type (like :class:`dict`).
Raises :class:`TypeError` if `document` is not a mapping type,
or contains keys that are not instances of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~bson.errors.InvalidDocument` if `document` cannot be
converted to :class:`BSON`.
:Parameters:
- `document`: mapping type representing a document
- `check_keys` (optional): check if keys start with '$' or
contain '.', raising :class:`~bson.errors.InvalidDocument` in
either case
.. versionadded:: 1.9
"""
return cls(_dict_to_bson(document, check_keys, uuid_subtype))
def decode(self, as_class=dict,
tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE):
"""Decode this BSON data.
The default type to use for the resultant document is
:class:`dict`. Any other class that supports
:meth:`__setitem__` can be used instead by passing it as the
`as_class` parameter.
If `tz_aware` is ``True`` (recommended), any
:class:`~datetime.datetime` instances returned will be
timezone-aware, with their timezone set to
:attr:`bson.tz_util.utc`. Otherwise (default), all
:class:`~datetime.datetime` instances will be naive (but
contain UTC).
:Parameters:
- `as_class` (optional): the class to use for the resulting
document
- `tz_aware` (optional): if ``True``, return timezone-aware
:class:`~datetime.datetime` instances
.. versionadded:: 1.9
"""
(document, _) = _bson_to_dict(self, as_class, tz_aware, uuid_subtype)
return document
def has_c():
"""Is the C extension installed?
.. versionadded:: 1.9
"""
return _use_c
def has_uuid():
"""Is the uuid module available?
.. versionadded:: 2.3
"""
return _use_uuid
|
PYSEC-2013-30
|
test/test_collection.py
|
@@ -30,6 +30,7 @@
from bson.binary import Binary, UUIDLegacy, OLD_UUID_SUBTYPE, UUID_SUBTYPE
from bson.code import Code
+from bson.dbref import DBRef
from bson.objectid import ObjectId
from bson.py3compat import b
from bson.son import SON
@@ -1675,6 +1676,31 @@ def test_bad_encode(self):
self.assertRaises(InvalidDocument, c.save, {"x": c})
warnings.simplefilter("default")
+ def test_bad_dbref(self):
+ c = self.db.test
+ c.drop()
+
+ # Incomplete DBRefs.
+ self.assertRaises(
+ InvalidDocument,
+ c.insert, {'ref': {'$ref': 'collection'}})
+
+ self.assertRaises(
+ InvalidDocument,
+ c.insert, {'ref': {'$id': ObjectId()}})
+
+ ref_only = {'ref': {'$ref': 'collection'}}
+ id_only = {'ref': {'$id': ObjectId()}}
+
+ # Force insert of ref without $id.
+ c.insert(ref_only, check_keys=False)
+ self.assertEqual(DBRef('collection', id=None), c.find_one()['ref'])
+ c.drop()
+
+ # DBRef without $ref is decoded as normal subdocument.
+ c.insert(id_only, check_keys=False)
+ self.assertEqual(id_only, c.find_one())
+
def test_as_class(self):
c = self.db.test
c.drop()
|
# -*- coding: utf-8 -*-
# Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the collection module."""
import itertools
import re
import sys
import threading
import time
import unittest
import warnings
from nose.plugins.skip import SkipTest
sys.path[0:0] = [""]
from bson.binary import Binary, UUIDLegacy, OLD_UUID_SUBTYPE, UUID_SUBTYPE
from bson.code import Code
from bson.objectid import ObjectId
from bson.py3compat import b
from bson.son import SON
from pymongo import (ASCENDING, DESCENDING, GEO2D,
GEOHAYSTACK, GEOSPHERE, HASHED)
from pymongo.collection import Collection
from pymongo.son_manipulator import SONManipulator
from pymongo.errors import (ConfigurationError,
DuplicateKeyError,
InvalidDocument,
InvalidName,
InvalidOperation,
OperationFailure,
TimeoutError)
from test.test_client import get_client
from test.utils import is_mongos, joinall
from test import (qcheck,
version)
have_uuid = True
try:
import uuid
except ImportError:
have_uuid = False
class TestCollection(unittest.TestCase):
def setUp(self):
self.client = get_client()
self.db = self.client.pymongo_test
def tearDown(self):
self.db.drop_collection("test_large_limit")
self.db = None
self.client = None
def test_collection(self):
self.assertRaises(TypeError, Collection, self.db, 5)
def make_col(base, name):
return base[name]
self.assertRaises(InvalidName, make_col, self.db, "")
self.assertRaises(InvalidName, make_col, self.db, "te$t")
self.assertRaises(InvalidName, make_col, self.db, ".test")
self.assertRaises(InvalidName, make_col, self.db, "test.")
self.assertRaises(InvalidName, make_col, self.db, "tes..t")
self.assertRaises(InvalidName, make_col, self.db.test, "")
self.assertRaises(InvalidName, make_col, self.db.test, "te$t")
self.assertRaises(InvalidName, make_col, self.db.test, ".test")
self.assertRaises(InvalidName, make_col, self.db.test, "test.")
self.assertRaises(InvalidName, make_col, self.db.test, "tes..t")
self.assertRaises(InvalidName, make_col, self.db.test, "tes\x00t")
self.assertTrue(isinstance(self.db.test, Collection))
self.assertEqual(self.db.test, self.db["test"])
self.assertEqual(self.db.test, Collection(self.db, "test"))
self.assertEqual(self.db.test.mike, self.db["test.mike"])
self.assertEqual(self.db.test["mike"], self.db["test.mike"])
self.db.drop_collection('test')
self.assertFalse('test' in self.db.collection_names())
# No exception
self.db.drop_collection('test')
def test_create_index(self):
db = self.db
self.assertRaises(TypeError, db.test.create_index, 5)
self.assertRaises(TypeError, db.test.create_index, {"hello": 1})
self.assertRaises(ValueError, db.test.create_index, [])
db.test.drop_indexes()
self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"})
.count(), 1)
db.test.create_index("hello")
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)])
count = 0
for _ in db.system.indexes.find({"ns": u"pymongo_test.test"}):
count += 1
self.assertEqual(count, 3)
db.test.drop_indexes()
ix = db.test.create_index([("hello", DESCENDING),
("world", ASCENDING)], name="hello_world")
self.assertEqual(ix, "hello_world")
db.test.drop_indexes()
self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"})
.count(), 1)
db.test.create_index("hello")
self.assertTrue(u"hello_1" in
[a["name"] for a in db.system.indexes
.find({"ns": u"pymongo_test.test"})])
db.test.drop_indexes()
self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"})
.count(), 1)
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)])
self.assertTrue(u"hello_-1_world_1" in
[a["name"] for a in db.system.indexes
.find({"ns": u"pymongo_test.test"})])
db.test.drop()
db.test.insert({'a': 1})
db.test.insert({'a': 1})
self.assertRaises(DuplicateKeyError, db.test.create_index,
'a', unique=True)
def test_ensure_index(self):
db = self.db
self.assertRaises(TypeError, db.test.ensure_index, {"hello": 1})
db.test.drop_indexes()
self.assertEqual("hello_1", db.test.create_index("hello"))
self.assertEqual("hello_1", db.test.create_index("hello"))
self.assertEqual("goodbye_1",
db.test.ensure_index("goodbye"))
self.assertEqual(None, db.test.ensure_index("goodbye"))
db.test.drop_indexes()
self.assertEqual("foo",
db.test.ensure_index("goodbye", name="foo"))
self.assertEqual(None, db.test.ensure_index("goodbye", name="foo"))
db.test.drop_indexes()
self.assertEqual("goodbye_1",
db.test.ensure_index("goodbye"))
self.assertEqual(None, db.test.ensure_index("goodbye"))
db.test.drop_index("goodbye_1")
self.assertEqual("goodbye_1",
db.test.ensure_index("goodbye"))
self.assertEqual(None, db.test.ensure_index("goodbye"))
db.drop_collection("test")
self.assertEqual("goodbye_1",
db.test.ensure_index("goodbye"))
self.assertEqual(None, db.test.ensure_index("goodbye"))
db.test.drop_index("goodbye_1")
self.assertEqual("goodbye_1",
db.test.create_index("goodbye"))
self.assertEqual(None, db.test.ensure_index("goodbye"))
db.test.drop_index("goodbye_1")
self.assertEqual("goodbye_1",
db.test.ensure_index("goodbye", cache_for=1))
time.sleep(1.2)
self.assertEqual("goodbye_1",
db.test.ensure_index("goodbye"))
db.test.drop_index("goodbye_1")
self.assertEqual("goodbye_1",
db.test.create_index("goodbye", cache_for=1))
time.sleep(1.2)
self.assertEqual("goodbye_1",
db.test.ensure_index("goodbye"))
# Make sure the expiration time is updated.
self.assertEqual(None,
db.test.ensure_index("goodbye"))
# Clean up indexes for later tests
db.test.drop_indexes()
def test_deprecated_ttl_index_kwarg(self):
db = self.db
# In Python 2.6+ we could use the catch_warnings context
# manager to test this warning nicely. As we can't do that
# we must test raising errors before the ignore filter is applied.
warnings.simplefilter("error", DeprecationWarning)
self.assertRaises(DeprecationWarning, lambda:
db.test.ensure_index("goodbye", ttl=10))
warnings.resetwarnings()
warnings.simplefilter("ignore")
self.assertEqual("goodbye_1",
db.test.ensure_index("goodbye", ttl=10))
self.assertEqual(None, db.test.ensure_index("goodbye"))
def test_ensure_unique_index_threaded(self):
coll = self.db.test_unique_threaded
coll.drop()
coll.insert(({'foo': i} for i in xrange(10000)))
class Indexer(threading.Thread):
def run(self):
try:
coll.ensure_index('foo', unique=True)
coll.insert({'foo': 'bar'})
coll.insert({'foo': 'bar'})
except OperationFailure:
pass
threads = []
for _ in xrange(10):
t = Indexer()
t.setDaemon(True)
threads.append(t)
for i in xrange(10):
threads[i].start()
joinall(threads)
self.assertEqual(10001, coll.count())
coll.drop()
def test_index_on_binary(self):
db = self.db
db.drop_collection("test")
db.test.save({"bin": Binary(b("def"))})
db.test.save({"bin": Binary(b("abc"))})
db.test.save({"bin": Binary(b("ghi"))})
self.assertEqual(db.test.find({"bin": Binary(b("abc"))})
.explain()["nscanned"], 3)
db.test.create_index("bin")
self.assertEqual(db.test.find({"bin": Binary(b("abc"))})
.explain()["nscanned"], 1)
def test_drop_index(self):
db = self.db
db.test.drop_indexes()
db.test.create_index("hello")
name = db.test.create_index("goodbye")
self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"})
.count(), 3)
self.assertEqual(name, "goodbye_1")
db.test.drop_index(name)
self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"})
.count(), 2)
self.assertTrue(u"hello_1" in
[a["name"] for a in db.system.indexes
.find({"ns": u"pymongo_test.test"})])
db.test.drop_indexes()
db.test.create_index("hello")
name = db.test.create_index("goodbye")
self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"})
.count(), 3)
self.assertEqual(name, "goodbye_1")
db.test.drop_index([("goodbye", ASCENDING)])
self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"})
.count(), 2)
self.assertTrue(u"hello_1" in
[a["name"] for a in db.system.indexes
.find({"ns": u"pymongo_test.test"})])
def test_reindex(self):
db = self.db
db.drop_collection("test")
db.test.insert({"foo": "bar", "who": "what", "when": "how"})
db.test.create_index("foo")
db.test.create_index("who")
db.test.create_index("when")
info = db.test.index_information()
def check_result(result):
self.assertEqual(4, result['nIndexes'])
self.assertEqual(4, result['nIndexesWas'])
indexes = result['indexes']
names = [idx['name'] for idx in indexes]
for name in names:
self.assertTrue(name in info)
for key in info:
self.assertTrue(key in names)
reindexed = db.test.reindex()
if 'raw' in reindexed:
# mongos
for result in reindexed['raw'].itervalues():
check_result(result)
else:
check_result(reindexed)
def test_index_info(self):
db = self.db
db.test.drop_indexes()
db.test.remove({})
db.test.save({}) # create collection
self.assertEqual(len(db.test.index_information()), 1)
self.assertTrue("_id_" in db.test.index_information())
db.test.create_index("hello")
self.assertEqual(len(db.test.index_information()), 2)
self.assertEqual(db.test.index_information()["hello_1"]["key"],
[("hello", ASCENDING)])
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)],
unique=True)
self.assertEqual(db.test.index_information()["hello_1"]["key"],
[("hello", ASCENDING)])
self.assertEqual(len(db.test.index_information()), 3)
self.assertEqual([("hello", DESCENDING), ("world", ASCENDING)],
db.test.index_information()["hello_-1_world_1"]["key"]
)
self.assertEqual(True,
db.test.index_information()["hello_-1_world_1"]["unique"])
def test_index_geo2d(self):
db = self.db
db.test.drop_indexes()
self.assertEqual('loc_2d', db.test.create_index([("loc", GEO2D)]))
index_info = db.test.index_information()['loc_2d']
self.assertEqual([('loc', '2d')], index_info['key'])
def test_index_haystack(self):
if is_mongos(self.db.connection):
raise SkipTest("geoSearch is not supported by mongos")
db = self.db
db.test.drop_indexes()
db.test.remove()
_id = db.test.insert({
"pos": {"long": 34.2, "lat": 33.3},
"type": "restaurant"
})
db.test.insert({
"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"
})
db.test.insert({
"pos": {"long": 59.1, "lat": 87.2}, "type": "office"
})
db.test.create_index(
[("pos", GEOHAYSTACK), ("type", ASCENDING)],
bucket_size=1
)
results = db.command(SON([
("geoSearch", "test"),
("near", [33, 33]),
("maxDistance", 6),
("search", {"type": "restaurant"}),
("limit", 30),
]))['results']
self.assertEqual(2, len(results))
self.assertEqual({
"_id": _id,
"pos": {"long": 34.2, "lat": 33.3},
"type": "restaurant"
}, results[0])
def test_index_text(self):
if not version.at_least(self.client, (2, 3, 2)):
raise SkipTest("Text search requires server >=2.3.2.")
if is_mongos(self.client):
raise SkipTest("setParameter does not work through mongos")
self.client.admin.command('setParameter', '*',
textSearchEnabled=True)
db = self.db
db.test.drop_indexes()
self.assertEqual("t_text", db.test.create_index([("t", "text")]))
index_info = db.test.index_information()["t_text"]
self.assertTrue("weights" in index_info)
db.test.drop_indexes()
self.client.admin.command('setParameter', '*',
textSearchEnabled=False)
def test_index_2dsphere(self):
if not version.at_least(self.client, (2, 3, 2)):
raise SkipTest("2dsphere indexing requires server >=2.3.2.")
db = self.db
db.test.drop_indexes()
self.assertEqual("geo_2dsphere",
db.test.create_index([("geo", GEOSPHERE)]))
poly = {"type": "Polygon",
"coordinates": [[[40,5], [40,6], [41,6], [41,5], [40,5]]]}
query = {"geo": {"$within": {"$geometry": poly}}}
self.assertTrue(
db.test.find(query).explain()['cursor'].startswith('S2Cursor'))
db.test.drop_indexes()
def test_index_hashed(self):
if not version.at_least(self.client, (2, 3, 2)):
raise SkipTest("hashed indexing requires server >=2.3.2.")
db = self.db
db.test.drop_indexes()
self.assertEqual("a_hashed",
db.test.create_index([("a", HASHED)]))
self.assertEqual("BtreeCursor a_hashed",
db.test.find({'a': 1}).explain()['cursor'])
db.test.drop_indexes()
def test_index_sparse(self):
db = self.db
db.test.drop_indexes()
db.test.create_index([('key', ASCENDING)], sparse=True)
self.assertTrue(db.test.index_information()['key_1']['sparse'])
def test_index_background(self):
db = self.db
db.test.drop_indexes()
db.test.create_index([('keya', ASCENDING)])
db.test.create_index([('keyb', ASCENDING)], background=False)
db.test.create_index([('keyc', ASCENDING)], background=True)
self.assertFalse('background' in db.test.index_information()['keya_1'])
self.assertFalse(db.test.index_information()['keyb_1']['background'])
self.assertTrue(db.test.index_information()['keyc_1']['background'])
def _drop_dups_setup(self, db):
db.drop_collection('test')
db.test.insert({'i': 1})
db.test.insert({'i': 2})
db.test.insert({'i': 2}) # duplicate
db.test.insert({'i': 3})
def test_index_drop_dups(self):
# Try dropping duplicates
db = self.db
self._drop_dups_setup(db)
if version.at_least(db.connection, (1, 9, 2)):
# No error, just drop the duplicate
db.test.create_index(
[('i', ASCENDING)],
unique=True,
drop_dups=True
)
else:
# https://jira.mongodb.org/browse/SERVER-2054 "Creating an index
# with dropDups shouldn't assert". On Mongo < 1.9.2, the duplicate
# is dropped & the index created, but an error is thrown.
def test_create():
db.test.create_index(
[('i', ASCENDING)],
unique=True,
drop_dups=True
)
self.assertRaises(DuplicateKeyError, test_create)
# Duplicate was dropped
self.assertEqual(3, db.test.count())
# Index was created, plus the index on _id
self.assertEqual(2, len(db.test.index_information()))
def test_index_dont_drop_dups(self):
# Try *not* dropping duplicates
db = self.db
self._drop_dups_setup(db)
# There's a duplicate
def test_create():
db.test.create_index(
[('i', ASCENDING)],
unique=True,
drop_dups=False
)
self.assertRaises(DuplicateKeyError, test_create)
# Duplicate wasn't dropped
self.assertEqual(4, db.test.count())
# Index wasn't created, only the default index on _id
self.assertEqual(1, len(db.test.index_information()))
def test_field_selection(self):
db = self.db
db.drop_collection("test")
doc = {"a": 1, "b": 5, "c": {"d": 5, "e": 10}}
db.test.insert(doc)
# Test field inclusion
doc = db.test.find({}, ["_id"]).next()
self.assertEqual(doc.keys(), ["_id"])
doc = db.test.find({}, ["a"]).next()
l = doc.keys()
l.sort()
self.assertEqual(l, ["_id", "a"])
doc = db.test.find({}, ["b"]).next()
l = doc.keys()
l.sort()
self.assertEqual(l, ["_id", "b"])
doc = db.test.find({}, ["c"]).next()
l = doc.keys()
l.sort()
self.assertEqual(l, ["_id", "c"])
doc = db.test.find({}, ["a"]).next()
self.assertEqual(doc["a"], 1)
doc = db.test.find({}, ["b"]).next()
self.assertEqual(doc["b"], 5)
doc = db.test.find({}, ["c"]).next()
self.assertEqual(doc["c"], {"d": 5, "e": 10})
# Test inclusion of fields with dots
doc = db.test.find({}, ["c.d"]).next()
self.assertEqual(doc["c"], {"d": 5})
doc = db.test.find({}, ["c.e"]).next()
self.assertEqual(doc["c"], {"e": 10})
doc = db.test.find({}, ["b", "c.e"]).next()
self.assertEqual(doc["c"], {"e": 10})
doc = db.test.find({}, ["b", "c.e"]).next()
l = doc.keys()
l.sort()
self.assertEqual(l, ["_id", "b", "c"])
doc = db.test.find({}, ["b", "c.e"]).next()
self.assertEqual(doc["b"], 5)
# Test field exclusion
doc = db.test.find({}, {"a": False, "b": 0}).next()
l = doc.keys()
l.sort()
self.assertEqual(l, ["_id", "c"])
doc = db.test.find({}, {"_id": False}).next()
l = doc.keys()
self.assertFalse("_id" in l)
def test_options(self):
db = self.db
db.drop_collection("test")
db.test.save({})
self.assertEqual(db.test.options(), {})
self.assertEqual(db.test.doesnotexist.options(), {})
db.drop_collection("test")
if version.at_least(db.connection, (1, 9)):
db.create_collection("test", capped=True, size=1000)
self.assertEqual(db.test.options(), {"capped": True, 'size': 1000})
else:
db.create_collection("test", capped=True)
self.assertEqual(db.test.options(), {"capped": True})
db.drop_collection("test")
def test_insert_find_one(self):
db = self.db
db.test.remove({})
self.assertEqual(0, len(list(db.test.find())))
doc = {"hello": u"world"}
id = db.test.insert(doc)
self.assertEqual(1, len(list(db.test.find())))
self.assertEqual(doc, db.test.find_one())
self.assertEqual(doc["_id"], id)
self.assertTrue(isinstance(id, ObjectId))
doc_class = None
# Work around http://bugs.jython.org/issue1728
if (sys.platform.startswith('java') and
sys.version_info[:3] >= (2, 5, 2)):
doc_class = SON
def remove_insert_find_one(doc):
db.test.remove({})
db.test.insert(doc)
# SON equality is order sensitive.
return db.test.find_one(as_class=doc_class) == doc.to_dict()
qcheck.check_unittest(self, remove_insert_find_one,
qcheck.gen_mongo_dict(3))
def test_generator_insert(self):
db = self.db
db.test.remove({})
self.assertEqual(db.test.find().count(), 0)
db.test.insert(({'a': i} for i in xrange(5)), manipulate=False)
self.assertEqual(5, db.test.count())
db.test.remove({})
def test_remove_all(self):
self.db.test.remove()
self.assertEqual(0, self.db.test.count())
self.db.test.insert({"x": 1})
self.db.test.insert({"y": 1})
self.assertEqual(2, self.db.test.count())
self.db.test.remove()
self.assertEqual(0, self.db.test.count())
def test_find_w_fields(self):
db = self.db
db.test.remove({})
db.test.insert({"x": 1, "mike": "awesome",
"extra thing": "abcdefghijklmnopqrstuvwxyz"})
self.assertEqual(1, db.test.count())
doc = db.test.find({}).next()
self.assertTrue("x" in doc)
doc = db.test.find({}).next()
self.assertTrue("mike" in doc)
doc = db.test.find({}).next()
self.assertTrue("extra thing" in doc)
doc = db.test.find({}, ["x", "mike"]).next()
self.assertTrue("x" in doc)
doc = db.test.find({}, ["x", "mike"]).next()
self.assertTrue("mike" in doc)
doc = db.test.find({}, ["x", "mike"]).next()
self.assertFalse("extra thing" in doc)
doc = db.test.find({}, ["mike"]).next()
self.assertFalse("x" in doc)
doc = db.test.find({}, ["mike"]).next()
self.assertTrue("mike" in doc)
doc = db.test.find({}, ["mike"]).next()
self.assertFalse("extra thing" in doc)
def test_fields_specifier_as_dict(self):
db = self.db
db.test.remove({})
db.test.insert({"x": [1, 2, 3], "mike": "awesome"})
self.assertEqual([1, 2, 3], db.test.find_one()["x"])
if version.at_least(db.connection, (1, 5, 1)):
self.assertEqual([2, 3],
db.test.find_one(fields={"x": {"$slice":
-2}})["x"])
self.assertTrue("x" not in db.test.find_one(fields={"x": 0}))
self.assertTrue("mike" in db.test.find_one(fields={"x": 0}))
def test_find_w_regex(self):
db = self.db
db.test.remove({})
db.test.insert({"x": "hello_world"})
db.test.insert({"x": "hello_mike"})
db.test.insert({"x": "hello_mikey"})
db.test.insert({"x": "hello_test"})
self.assertEqual(db.test.find().count(), 4)
self.assertEqual(db.test.find({"x":
re.compile("^hello.*")}).count(), 4)
self.assertEqual(db.test.find({"x":
re.compile("ello")}).count(), 4)
self.assertEqual(db.test.find({"x":
re.compile("^hello$")}).count(), 0)
self.assertEqual(db.test.find({"x":
re.compile("^hello_mi.*$")}).count(), 2)
def test_id_can_be_anything(self):
db = self.db
db.test.remove({})
auto_id = {"hello": "world"}
db.test.insert(auto_id)
self.assertTrue(isinstance(auto_id["_id"], ObjectId))
numeric = {"_id": 240, "hello": "world"}
db.test.insert(numeric)
self.assertEqual(numeric["_id"], 240)
object = {"_id": numeric, "hello": "world"}
db.test.insert(object)
self.assertEqual(object["_id"], numeric)
for x in db.test.find():
self.assertEqual(x["hello"], u"world")
self.assertTrue("_id" in x)
def test_iteration(self):
db = self.db
def iterate():
[a for a in db.test]
self.assertRaises(TypeError, iterate)
def test_invalid_key_names(self):
db = self.db
db.test.drop()
db.test.insert({"hello": "world"})
db.test.insert({"hello": {"hello": "world"}})
self.assertRaises(InvalidDocument, db.test.insert, {"$hello": "world"})
self.assertRaises(InvalidDocument, db.test.insert,
{"hello": {"$hello": "world"}})
db.test.insert({"he$llo": "world"})
db.test.insert({"hello": {"hello$": "world"}})
self.assertRaises(InvalidDocument, db.test.insert,
{".hello": "world"})
self.assertRaises(InvalidDocument, db.test.insert,
{"hello": {".hello": "world"}})
self.assertRaises(InvalidDocument, db.test.insert,
{"hello.": "world"})
self.assertRaises(InvalidDocument, db.test.insert,
{"hello": {"hello.": "world"}})
self.assertRaises(InvalidDocument, db.test.insert,
{"hel.lo": "world"})
self.assertRaises(InvalidDocument, db.test.insert,
{"hello": {"hel.lo": "world"}})
def test_insert_multiple(self):
db = self.db
db.drop_collection("test")
doc1 = {"hello": u"world"}
doc2 = {"hello": u"mike"}
self.assertEqual(db.test.find().count(), 0)
ids = db.test.insert([doc1, doc2])
self.assertEqual(db.test.find().count(), 2)
self.assertEqual(doc1, db.test.find_one({"hello": u"world"}))
self.assertEqual(doc2, db.test.find_one({"hello": u"mike"}))
self.assertEqual(2, len(ids))
self.assertEqual(doc1["_id"], ids[0])
self.assertEqual(doc2["_id"], ids[1])
id = db.test.insert([{"hello": 1}])
self.assertTrue(isinstance(id, list))
self.assertEqual(1, len(id))
self.assertRaises(InvalidOperation, db.test.insert, [])
def test_insert_multiple_with_duplicate(self):
db = self.db
db.drop_collection("test")
db.test.ensure_index([('i', ASCENDING)], unique=True)
# No error
db.test.insert([{'i': i} for i in range(5, 10)], w=0)
db.test.remove()
# No error
db.test.insert([{'i': 1}] * 2, w=0)
self.assertEqual(1, db.test.count())
self.assertRaises(
DuplicateKeyError,
lambda: db.test.insert([{'i': 2}] * 2),
)
db.drop_collection("test")
db.write_concern['w'] = 0
db.test.ensure_index([('i', ASCENDING)], unique=True)
# No error
db.test.insert([{'i': 1}] * 2)
self.assertEqual(1, db.test.count())
# Implied safe
self.assertRaises(
DuplicateKeyError,
lambda: db.test.insert([{'i': 2}] * 2, j=True),
)
# Explicit safe
self.assertRaises(
DuplicateKeyError,
lambda: db.test.insert([{'i': 2}] * 2, w=1),
)
# Misconfigured value for safe
self.assertRaises(
TypeError,
lambda: db.test.insert([{'i': 2}] * 2, safe=1),
)
def test_insert_iterables(self):
db = self.db
self.assertRaises(TypeError, db.test.insert, 4)
self.assertRaises(TypeError, db.test.insert, None)
self.assertRaises(TypeError, db.test.insert, True)
db.drop_collection("test")
self.assertEqual(db.test.find().count(), 0)
ids = db.test.insert(({"hello": u"world"}, {"hello": u"world"}))
self.assertEqual(db.test.find().count(), 2)
db.drop_collection("test")
self.assertEqual(db.test.find().count(), 0)
ids = db.test.insert(itertools.imap(lambda x: {"hello": "world"},
itertools.repeat(None, 10)))
self.assertEqual(db.test.find().count(), 10)
def test_save(self):
self.db.drop_collection("test")
# Save a doc with autogenerated id
id = self.db.test.save({"hello": "world"})
self.assertEqual(self.db.test.find_one()["_id"], id)
self.assertTrue(isinstance(id, ObjectId))
# Save a doc with explicit id
self.db.test.save({"_id": "explicit_id", "hello": "bar"})
doc = self.db.test.find_one({"_id": "explicit_id"})
self.assertEqual(doc['_id'], 'explicit_id')
self.assertEqual(doc['hello'], 'bar')
# Save docs with _id field already present (shouldn't create new docs)
self.assertEqual(2, self.db.test.count())
self.db.test.save({'_id': id, 'hello': 'world'})
self.assertEqual(2, self.db.test.count())
self.db.test.save({'_id': 'explicit_id', 'hello': 'baz'})
self.assertEqual(2, self.db.test.count())
self.assertEqual(
'baz',
self.db.test.find_one({'_id': 'explicit_id'})['hello']
)
# Safe mode
self.db.test.create_index("hello", unique=True)
# No exception, even though we duplicate the first doc's "hello" value
self.db.test.save({'_id': 'explicit_id', 'hello': 'world'}, w=0)
self.assertRaises(
DuplicateKeyError,
self.db.test.save,
{'_id': 'explicit_id', 'hello': 'world'})
def test_save_with_invalid_key(self):
self.db.drop_collection("test")
self.assertTrue(self.db.test.insert({"hello": "world"}))
doc = self.db.test.find_one()
doc['a.b'] = 'c'
self.assertRaises(InvalidDocument, self.db.test.save, doc)
def test_unique_index(self):
db = self.db
db.drop_collection("test")
db.test.create_index("hello")
db.test.save({"hello": "world"})
db.test.save({"hello": "mike"})
db.test.save({"hello": "world"})
self.assertFalse(db.error())
db.drop_collection("test")
db.test.create_index("hello", unique=True)
db.test.save({"hello": "world"})
db.test.save({"hello": "mike"})
db.test.save({"hello": "world"}, w=0)
self.assertTrue(db.error())
def test_duplicate_key_error(self):
db = self.db
db.drop_collection("test")
db.test.create_index("x", unique=True)
db.test.insert({"_id": 1, "x": 1})
db.test.insert({"_id": 2, "x": 2})
# No error
db.test.insert({"_id": 1, "x": 1}, safe=False)
db.test.save({"_id": 1, "x": 1}, safe=False)
db.test.insert({"_id": 2, "x": 2}, safe=False)
db.test.save({"_id": 2, "x": 2}, safe=False)
db.test.insert({"_id": 1, "x": 1}, w=0)
db.test.save({"_id": 1, "x": 1}, w=0)
db.test.insert({"_id": 2, "x": 2}, w=0)
db.test.save({"_id": 2, "x": 2}, w=0)
# But all those statements didn't do anything
self.assertEqual(2, db.test.count())
expected_error = OperationFailure
if version.at_least(db.connection, (1, 3)):
expected_error = DuplicateKeyError
self.assertRaises(expected_error,
db.test.insert, {"_id": 1})
self.assertRaises(expected_error,
db.test.insert, {"x": 1})
self.assertRaises(expected_error,
db.test.save, {"x": 2})
self.assertRaises(expected_error,
db.test.update, {"x": 1},
{"$inc": {"x": 1}})
def test_continue_on_error(self):
db = self.db
if not version.at_least(db.connection, (1, 9, 1)):
raise SkipTest("continue_on_error requires MongoDB >= 1.9.1")
db.drop_collection("test")
oid = db.test.insert({"one": 1})
self.assertEqual(1, db.test.count())
docs = []
docs.append({"_id": oid, "two": 2})
docs.append({"three": 3})
docs.append({"four": 4})
docs.append({"five": 5})
db.test.insert(docs, manipulate=False, w=0)
self.assertEqual(11000, db.error()['code'])
self.assertEqual(1, db.test.count())
db.test.insert(docs, manipulate=False, continue_on_error=True, w=0)
self.assertEqual(11000, db.error()['code'])
self.assertEqual(4, db.test.count())
db.drop_collection("test")
oid = db.test.insert({"_id": oid, "one": 1}, w=0)
self.assertEqual(1, db.test.count())
docs[0].pop("_id")
docs[2]["_id"] = oid
db.test.insert(docs, manipulate=False, w=0)
self.assertEqual(11000, db.error()['code'])
self.assertEqual(3, db.test.count())
db.test.insert(docs, manipulate=False, continue_on_error=True, w=0)
self.assertEqual(11000, db.error()['code'])
self.assertEqual(6, db.test.count())
def test_error_code(self):
try:
self.db.test.update({}, {"$thismodifierdoesntexist": 1})
self.fail()
except OperationFailure, e:
if version.at_least(self.db.connection, (1, 3)):
self.assertEqual(10147, e.code)
def test_index_on_subfield(self):
db = self.db
db.drop_collection("test")
db.test.insert({"hello": {"a": 4, "b": 5}})
db.test.insert({"hello": {"a": 7, "b": 2}})
db.test.insert({"hello": {"a": 4, "b": 10}})
db.drop_collection("test")
db.test.create_index("hello.a", unique=True)
db.test.insert({"hello": {"a": 4, "b": 5}})
db.test.insert({"hello": {"a": 7, "b": 2}})
self.assertRaises(DuplicateKeyError,
db.test.insert, {"hello": {"a": 4, "b": 10}})
def test_safe_insert(self):
db = self.db
db.drop_collection("test")
a = {"hello": "world"}
db.test.insert(a)
db.test.insert(a, w=0)
self.assertTrue("E11000" in db.error()["err"])
self.assertRaises(OperationFailure, db.test.insert, a)
def test_update(self):
db = self.db
db.drop_collection("test")
id1 = db.test.save({"x": 5})
db.test.update({}, {"$inc": {"x": 1}})
self.assertEqual(db.test.find_one(id1)["x"], 6)
id2 = db.test.save({"x": 1})
db.test.update({"x": 6}, {"$inc": {"x": 1}})
self.assertEqual(db.test.find_one(id1)["x"], 7)
self.assertEqual(db.test.find_one(id2)["x"], 1)
def test_multi_update(self):
db = self.db
if not version.at_least(db.connection, (1, 1, 3, -1)):
raise SkipTest("multi-update requires MongoDB >= 1.1.3")
db.drop_collection("test")
db.test.save({"x": 4, "y": 3})
db.test.save({"x": 5, "y": 5})
db.test.save({"x": 4, "y": 4})
db.test.update({"x": 4}, {"$set": {"y": 5}}, multi=True)
self.assertEqual(3, db.test.count())
for doc in db.test.find():
self.assertEqual(5, doc["y"])
self.assertEqual(2, db.test.update({"x": 4}, {"$set": {"y": 6}},
multi=True)["n"])
def test_upsert(self):
db = self.db
db.drop_collection("test")
db.test.update({"page": "/"}, {"$inc": {"count": 1}}, upsert=True)
db.test.update({"page": "/"}, {"$inc": {"count": 1}}, upsert=True)
self.assertEqual(1, db.test.count())
self.assertEqual(2, db.test.find_one()["count"])
def test_safe_update(self):
db = self.db
v113minus = version.at_least(db.connection, (1, 1, 3, -1))
v19 = version.at_least(db.connection, (1, 9))
db.drop_collection("test")
db.test.create_index("x", unique=True)
db.test.insert({"x": 5})
id = db.test.insert({"x": 4})
self.assertEqual(
None, db.test.update({"_id": id}, {"$inc": {"x": 1}}, w=0))
if v19:
self.assertTrue("E11000" in db.error()["err"])
elif v113minus:
self.assertTrue(db.error()["err"].startswith("E11001"))
else:
self.assertTrue(db.error()["err"].startswith("E12011"))
self.assertRaises(OperationFailure, db.test.update,
{"_id": id}, {"$inc": {"x": 1}})
self.assertEqual(1, db.test.update({"_id": id},
{"$inc": {"x": 2}})["n"])
self.assertEqual(0, db.test.update({"_id": "foo"},
{"$inc": {"x": 2}})["n"])
def test_update_with_invalid_keys(self):
self.db.drop_collection("test")
self.assertTrue(self.db.test.insert({"hello": "world"}))
doc = self.db.test.find_one()
doc['a.b'] = 'c'
# Replace
self.assertRaises(InvalidDocument,
self.db.test.update, {"hello": "world"}, doc)
# Upsert
self.assertRaises(InvalidDocument,
self.db.test.update, {"foo": "bar"}, doc, upsert=True)
# Check that the last two ops didn't actually modify anything
self.assertTrue('a.b' not in self.db.test.find_one())
# Modify shouldn't check keys...
self.assertTrue(self.db.test.update({"hello": "world"},
{"$set": {"foo.bar": "baz"}},
upsert=True))
# I know this seems like testing the server but I'd like to be notified
# by CI if the server's behavior changes here.
doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")])
self.assertRaises(OperationFailure, self.db.test.update,
{"hello": "world"}, doc, upsert=True)
# This is going to cause keys to be checked and raise InvalidDocument.
# That's OK assuming the server's behavior in the previous assert
# doesn't change. If the behavior changes checking the first key for
# '$' in update won't be good enough anymore.
doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})])
self.assertRaises(InvalidDocument, self.db.test.update,
{"hello": "world"}, doc, upsert=True)
# Replace with empty document
self.assertNotEqual(0, self.db.test.update({"hello": "world"},
{})['n'])
def test_safe_save(self):
db = self.db
db.drop_collection("test")
db.test.create_index("hello", unique=True)
db.test.save({"hello": "world"})
db.test.save({"hello": "world"}, w=0)
self.assertTrue("E11000" in db.error()["err"])
self.assertRaises(OperationFailure, db.test.save,
{"hello": "world"})
def test_safe_remove(self):
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=1000)
db.test.insert({"x": 1})
self.assertEqual(1, db.test.count())
self.assertEqual(None, db.test.remove({"x": 1}, w=0))
self.assertEqual(1, db.test.count())
if version.at_least(db.connection, (1, 1, 3, -1)):
self.assertRaises(OperationFailure, db.test.remove,
{"x": 1})
else: # Just test that it doesn't blow up
db.test.remove({"x": 1})
db.drop_collection("test")
db.test.insert({"x": 1})
db.test.insert({"x": 1})
self.assertEqual(2, db.test.remove({})["n"])
self.assertEqual(0, db.test.remove({})["n"])
def test_last_error_options(self):
if not version.at_least(self.client, (1, 5, 1)):
raise SkipTest("getLastError options require MongoDB >= 1.5.1")
# XXX: Fix this if we ever have a replica set unittest env.
# mongo >=1.7.6 errors with 'norepl' when w=2+
# and we aren't replicated.
if not version.at_least(self.client, (1, 7, 6)):
self.assertRaises(TimeoutError, self.db.test.save,
{"x": 1}, w=2, wtimeout=1)
self.assertRaises(TimeoutError, self.db.test.insert,
{"x": 1}, w=2, wtimeout=1)
self.assertRaises(TimeoutError, self.db.test.update,
{"x": 1}, {"y": 2}, w=2, wtimeout=1)
self.assertRaises(TimeoutError, self.db.test.remove,
{"x": 1}, w=2, wtimeout=1)
self.db.test.save({"x": 1}, w=1, wtimeout=1)
self.db.test.insert({"x": 1}, w=1, wtimeout=1)
self.db.test.remove({"x": 1}, w=1, wtimeout=1)
self.db.test.update({"x": 1}, {"y": 2}, w=1, wtimeout=1)
def test_manual_last_error(self):
self.db.test.save({"x": 1}, w=0)
# XXX: Fix this if we ever have a replica set unittest env.
# mongo >=1.7.6 errors with 'norepl' when w=2+
# and we aren't replicated
if not version.at_least(self.client, (1, 7, 6)):
self.assertRaises(TimeoutError, self.db.command,
"getlasterror", w=2, wtimeout=1)
self.db.command("getlasterror", w=1, wtimeout=1)
def test_count(self):
db = self.db
db.drop_collection("test")
self.assertEqual(db.test.count(), 0)
db.test.save({})
db.test.save({})
self.assertEqual(db.test.count(), 2)
db.test.save({'foo': 'bar'})
db.test.save({'foo': 'baz'})
self.assertEqual(db.test.find({'foo': 'bar'}).count(), 1)
self.assertEqual(db.test.find({'foo': re.compile(r'ba.*')}).count(), 2)
def test_aggregate(self):
if not version.at_least(self.db.connection, (2, 1, 0)):
raise SkipTest("The aggregate command requires MongoDB >= 2.1.0")
db = self.db
db.drop_collection("test")
db.test.save({'foo': [1, 2]})
self.assertRaises(TypeError, db.test.aggregate, "wow")
pipeline = {"$project": {"_id": False, "foo": True}}
expected = {'ok': 1.0, 'result': [{'foo': [1, 2]}]}
self.assertEqual(expected, db.test.aggregate(pipeline))
self.assertEqual(expected, db.test.aggregate([pipeline]))
self.assertEqual(expected, db.test.aggregate((pipeline,)))
def test_group(self):
db = self.db
db.drop_collection("test")
def group_checker(args, expected):
eval = db.test.group(*args)
self.assertEqual(eval, expected)
self.assertEqual([],
db.test.group([], {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
db.test.save({"a": 2})
db.test.save({"b": 5})
db.test.save({"a": 1})
self.assertEqual([{"count": 3}],
db.test.group([], {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
self.assertEqual([{"count": 1}],
db.test.group([], {"a": {"$gt": 1}}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
db.test.save({"a": 2, "b": 3})
self.assertEqual([{"a": 2, "count": 2},
{"a": None, "count": 1},
{"a": 1, "count": 1}],
db.test.group(["a"], {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
# modifying finalize
self.assertEqual([{"a": 2, "count": 3},
{"a": None, "count": 2},
{"a": 1, "count": 2}],
db.test.group(["a"], {}, {"count": 0},
"function (obj, prev) "
"{ prev.count++; }",
"function (obj) { obj.count++; }"))
# returning finalize
self.assertEqual([2, 1, 1],
db.test.group(["a"], {}, {"count": 0},
"function (obj, prev) "
"{ prev.count++; }",
"function (obj) { return obj.count; }"))
# keyf
self.assertEqual([2, 2],
db.test.group("function (obj) { if (obj.a == 2) "
"{ return {a: true} }; "
"return {b: true}; }", {}, {"count": 0},
"function (obj, prev) "
"{ prev.count++; }",
"function (obj) { return obj.count; }"))
# no key
self.assertEqual([{"count": 4}],
db.test.group(None, {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
self.assertRaises(OperationFailure, db.test.group,
[], {}, {}, "5 ++ 5")
def test_group_with_scope(self):
db = self.db
db.drop_collection("test")
db.test.save({"a": 1})
db.test.save({"b": 1})
reduce_function = "function (obj, prev) { prev.count += inc_value; }"
self.assertEqual(2, db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 1}))[0]['count'])
self.assertEqual(4, db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 2}))[0]['count'])
self.assertEqual(1,
db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 0.5}))[0]['count'])
if version.at_least(db.connection, (1, 1)):
self.assertEqual(2, db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 1}),
)[0]['count'])
self.assertEqual(4, db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 2}),
)[0]['count'])
self.assertEqual(1, db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 0.5}),
)[0]['count'])
def test_large_limit(self):
db = self.db
db.drop_collection("test_large_limit")
db.test_large_limit.create_index([('x', 1)])
for i in range(2000):
doc = {"x": i, "y": "mongomongo" * 1000}
db.test_large_limit.insert(doc)
# Wait for insert to complete; often mysteriously failing in Jenkins
st = time.time()
while (
len(list(db.test_large_limit.find())) < 2000
and time.time() - st < 30
):
time.sleep(1)
self.assertEqual(2000, len(list(db.test_large_limit.find())))
i = 0
y = 0
for doc in db.test_large_limit.find(limit=1900).sort([('x', 1)]):
i += 1
y += doc["x"]
self.assertEqual(1900, i)
self.assertEqual((1900 * 1899) / 2, y)
def test_find_kwargs(self):
db = self.db
db.drop_collection("test")
for i in range(10):
db.test.insert({"x": i})
self.assertEqual(10, db.test.count())
sum = 0
for x in db.test.find({}, skip=4, limit=2):
sum += x["x"]
self.assertEqual(9, sum)
def test_rename(self):
db = self.db
db.drop_collection("test")
db.drop_collection("foo")
self.assertRaises(TypeError, db.test.rename, 5)
self.assertRaises(InvalidName, db.test.rename, "")
self.assertRaises(InvalidName, db.test.rename, "te$t")
self.assertRaises(InvalidName, db.test.rename, ".test")
self.assertRaises(InvalidName, db.test.rename, "test.")
self.assertRaises(InvalidName, db.test.rename, "tes..t")
self.assertEqual(0, db.test.count())
self.assertEqual(0, db.foo.count())
for i in range(10):
db.test.insert({"x": i})
self.assertEqual(10, db.test.count())
db.test.rename("foo")
self.assertEqual(0, db.test.count())
self.assertEqual(10, db.foo.count())
x = 0
for doc in db.foo.find():
self.assertEqual(x, doc["x"])
x += 1
db.test.insert({})
self.assertRaises(OperationFailure, db.foo.rename, "test")
db.foo.rename("test", dropTarget=True)
# doesn't really test functionality, just that the option is set correctly
def test_snapshot(self):
db = self.db
self.assertRaises(TypeError, db.test.find, snapshot=5)
list(db.test.find(snapshot=True))
self.assertRaises(OperationFailure, list,
db.test.find(snapshot=True).sort("foo", 1))
def test_find_one(self):
db = self.db
db.drop_collection("test")
id = db.test.save({"hello": "world", "foo": "bar"})
self.assertEqual("world", db.test.find_one()["hello"])
self.assertEqual(db.test.find_one(id), db.test.find_one())
self.assertEqual(db.test.find_one(None), db.test.find_one())
self.assertEqual(db.test.find_one({}), db.test.find_one())
self.assertEqual(db.test.find_one({"hello": "world"}),
db.test.find_one())
self.assertTrue("hello" in db.test.find_one(fields=["hello"]))
self.assertTrue("hello" not in db.test.find_one(fields=["foo"]))
self.assertEqual(["_id"], db.test.find_one(fields=[]).keys())
self.assertEqual(None, db.test.find_one({"hello": "foo"}))
self.assertEqual(None, db.test.find_one(ObjectId()))
def test_find_one_non_objectid(self):
db = self.db
db.drop_collection("test")
db.test.save({"_id": 5})
self.assertTrue(db.test.find_one(5))
self.assertFalse(db.test.find_one(6))
def test_remove_non_objectid(self):
db = self.db
db.drop_collection("test")
db.test.save({"_id": 5})
self.assertEqual(1, db.test.count())
db.test.remove(5)
self.assertEqual(0, db.test.count())
def test_find_one_with_find_args(self):
db = self.db
db.drop_collection("test")
db.test.save({"x": 1})
db.test.save({"x": 2})
db.test.save({"x": 3})
self.assertEqual(1, db.test.find_one()["x"])
self.assertEqual(2, db.test.find_one(skip=1, limit=2)["x"])
def test_find_with_sort(self):
db = self.db
db.drop_collection("test")
db.test.save({"x": 2})
db.test.save({"x": 1})
db.test.save({"x": 3})
self.assertEqual(2, db.test.find_one()["x"])
self.assertEqual(1, db.test.find_one(sort=[("x", 1)])["x"])
self.assertEqual(3, db.test.find_one(sort=[("x", -1)])["x"])
def to_list(foo):
return [bar["x"] for bar in foo]
self.assertEqual([2, 1, 3], to_list(db.test.find()))
self.assertEqual([1, 2, 3], to_list(db.test.find(sort=[("x", 1)])))
self.assertEqual([3, 2, 1], to_list(db.test.find(sort=[("x", -1)])))
self.assertRaises(TypeError, db.test.find, sort=5)
self.assertRaises(TypeError, db.test.find, sort="hello")
self.assertRaises(ValueError, db.test.find, sort=["hello", 1])
def test_insert_adds_id(self):
doc = {"hello": "world"}
self.db.test.insert(doc)
self.assertTrue("_id" in doc)
docs = [{"hello": "world"}, {"hello": "world"}]
self.db.test.insert(docs)
for doc in docs:
self.assertTrue("_id" in doc)
def test_save_adds_id(self):
doc = {"hello": "jesse"}
self.db.test.save(doc)
self.assertTrue("_id" in doc)
# TODO doesn't actually test functionality, just that it doesn't blow up
def test_cursor_timeout(self):
list(self.db.test.find(timeout=False))
list(self.db.test.find(timeout=True))
def test_distinct(self):
if not version.at_least(self.db.connection, (1, 1)):
raise SkipTest("distinct command requires MongoDB >= 1.1")
self.db.drop_collection("test")
test = self.db.test
test.save({"a": 1})
test.save({"a": 2})
test.save({"a": 2})
test.save({"a": 2})
test.save({"a": 3})
distinct = test.distinct("a")
distinct.sort()
self.assertEqual([1, 2, 3], distinct)
distinct = test.find({'a': {'$gt': 1}}).distinct("a")
distinct.sort()
self.assertEqual([2, 3], distinct)
self.db.drop_collection("test")
test.save({"a": {"b": "a"}, "c": 12})
test.save({"a": {"b": "b"}, "c": 12})
test.save({"a": {"b": "c"}, "c": 12})
test.save({"a": {"b": "c"}, "c": 12})
distinct = test.distinct("a.b")
distinct.sort()
self.assertEqual(["a", "b", "c"], distinct)
def test_query_on_query_field(self):
self.db.drop_collection("test")
self.db.test.save({"query": "foo"})
self.db.test.save({"bar": "foo"})
self.assertEqual(1,
self.db.test.find({"query": {"$ne": None}}).count())
self.assertEqual(1,
len(list(self.db.test.find({"query": {"$ne": None}})))
)
def test_min_query(self):
self.db.drop_collection("test")
self.db.test.save({"x": 1})
self.db.test.save({"x": 2})
self.db.test.create_index("x")
self.assertEqual(1, len(list(self.db.test.find({"$min": {"x": 2},
"$query": {}}))))
self.assertEqual(2, self.db.test.find({"$min": {"x": 2},
"$query": {}})[0]["x"])
def test_insert_large_document(self):
max_size = self.db.connection.max_bson_size
half_size = int(max_size / 2)
if version.at_least(self.db.connection, (1, 7, 4)):
self.assertEqual(max_size, 16777216)
self.assertRaises(InvalidDocument, self.db.test.insert,
{"foo": "x" * max_size})
self.assertRaises(InvalidDocument, self.db.test.save,
{"foo": "x" * max_size})
self.assertRaises(InvalidDocument, self.db.test.insert,
[{"x": 1}, {"foo": "x" * max_size}])
self.db.test.insert([{"foo": "x" * half_size},
{"foo": "x" * half_size}])
self.db.test.insert({"bar": "x"})
self.assertRaises(InvalidDocument, self.db.test.update,
{"bar": "x"}, {"bar": "x" * (max_size - 14)})
self.db.test.update({"bar": "x"}, {"bar": "x" * (max_size - 15)})
def test_map_reduce(self):
if not version.at_least(self.db.connection, (1, 1, 1)):
raise SkipTest("mapReduce command requires MongoDB >= 1.1.1")
db = self.db
db.drop_collection("test")
db.test.insert({"id": 1, "tags": ["dog", "cat"]})
db.test.insert({"id": 2, "tags": ["cat"]})
db.test.insert({"id": 3, "tags": ["mouse", "cat", "dog"]})
db.test.insert({"id": 4, "tags": []})
map = Code("function () {"
" this.tags.forEach(function(z) {"
" emit(z, 1);"
" });"
"}")
reduce = Code("function (key, values) {"
" var total = 0;"
" for (var i = 0; i < values.length; i++) {"
" total += values[i];"
" }"
" return total;"
"}")
result = db.test.map_reduce(map, reduce, out='mrunittests')
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(2, result.find_one({"_id": "dog"})["value"])
self.assertEqual(1, result.find_one({"_id": "mouse"})["value"])
if version.at_least(self.db.connection, (1, 7, 4)):
db.test.insert({"id": 5, "tags": ["hampster"]})
result = db.test.map_reduce(map, reduce, out='mrunittests')
self.assertEqual(1, result.find_one({"_id": "hampster"})["value"])
db.test.remove({"id": 5})
result = db.test.map_reduce(map, reduce,
out={'merge': 'mrunittests'})
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(1, result.find_one({"_id": "hampster"})["value"])
result = db.test.map_reduce(map, reduce,
out={'reduce': 'mrunittests'})
self.assertEqual(6, result.find_one({"_id": "cat"})["value"])
self.assertEqual(4, result.find_one({"_id": "dog"})["value"])
self.assertEqual(2, result.find_one({"_id": "mouse"})["value"])
self.assertEqual(1, result.find_one({"_id": "hampster"})["value"])
result = db.test.map_reduce(
map,
reduce,
out={'replace': 'mrunittests'}
)
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(2, result.find_one({"_id": "dog"})["value"])
self.assertEqual(1, result.find_one({"_id": "mouse"})["value"])
if (is_mongos(self.db.connection)
and not version.at_least(self.db.connection, (2, 1, 2))):
pass
else:
result = db.test.map_reduce(map, reduce,
out=SON([('replace', 'mrunittests'),
('db', 'mrtestdb')
]))
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(2, result.find_one({"_id": "dog"})["value"])
self.assertEqual(1, result.find_one({"_id": "mouse"})["value"])
self.client.drop_database('mrtestdb')
full_result = db.test.map_reduce(map, reduce,
out='mrunittests', full_response=True)
self.assertEqual(6, full_result["counts"]["emit"])
result = db.test.map_reduce(map, reduce, out='mrunittests', limit=2)
self.assertEqual(2, result.find_one({"_id": "cat"})["value"])
self.assertEqual(1, result.find_one({"_id": "dog"})["value"])
self.assertEqual(None, result.find_one({"_id": "mouse"}))
if version.at_least(self.db.connection, (1, 7, 4)):
result = db.test.map_reduce(map, reduce, out={'inline': 1})
self.assertTrue(isinstance(result, dict))
self.assertTrue('results' in result)
self.assertTrue(result['results'][1]["_id"] in ("cat",
"dog",
"mouse"))
result = db.test.inline_map_reduce(map, reduce)
self.assertTrue(isinstance(result, list))
self.assertEqual(3, len(result))
self.assertTrue(result[1]["_id"] in ("cat", "dog", "mouse"))
full_result = db.test.inline_map_reduce(map, reduce,
full_response=True)
self.assertEqual(6, full_result["counts"]["emit"])
def test_messages_with_unicode_collection_names(self):
db = self.db
db[u"Employés"].insert({"x": 1})
db[u"Employés"].update({"x": 1}, {"x": 2})
db[u"Employés"].remove({})
db[u"Employés"].find_one()
list(db[u"Employés"].find())
def test_drop_indexes_non_existant(self):
self.db.drop_collection("test")
self.db.test.drop_indexes()
# This is really a bson test but easier to just reproduce it here...
# (Shame on me)
def test_bad_encode(self):
c = self.db.test
warnings.simplefilter("ignore")
self.assertRaises(InvalidDocument, c.save, {"x": c})
warnings.simplefilter("default")
def test_as_class(self):
c = self.db.test
c.drop()
c.insert({"x": 1})
doc = c.find().next()
self.assertTrue(isinstance(doc, dict))
doc = c.find().next()
self.assertFalse(isinstance(doc, SON))
doc = c.find(as_class=SON).next()
self.assertTrue(isinstance(doc, SON))
self.assertTrue(isinstance(c.find_one(), dict))
self.assertFalse(isinstance(c.find_one(), SON))
self.assertTrue(isinstance(c.find_one(as_class=SON), SON))
self.assertEqual(1, c.find_one(as_class=SON)["x"])
doc = c.find(as_class=SON).next()
self.assertEqual(1, doc["x"])
def test_find_and_modify(self):
c = self.db.test
c.drop()
c.insert({'_id': 1, 'i': 1})
# Test that we raise DuplicateKeyError when appropriate.
c.ensure_index('i', unique=True)
self.assertRaises(DuplicateKeyError,
c.find_and_modify, query={'i': 1, 'j': 1},
update={'$set': {'k': 1}}, upsert=True)
c.drop_indexes()
# Test correct findAndModify
self.assertEqual({'_id': 1, 'i': 1},
c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}))
self.assertEqual({'_id': 1, 'i': 3},
c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}},
new=True))
self.assertEqual({'_id': 1, 'i': 3},
c.find_and_modify({'_id': 1}, remove=True))
self.assertEqual(None, c.find_one({'_id': 1}))
self.assertEqual(None,
c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}))
# The return value changed in 2.1.2. See SERVER-6226.
if version.at_least(self.db.connection, (2, 1, 2)):
self.assertEqual(None, c.find_and_modify({'_id': 1},
{'$inc': {'i': 1}},
upsert=True))
else:
self.assertEqual({}, c.find_and_modify({'_id': 1},
{'$inc': {'i': 1}},
upsert=True))
self.assertEqual({'_id': 1, 'i': 2},
c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}},
upsert=True, new=True))
self.assertEqual({'_id': 1, 'i': 2},
c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}},
fields=['i']))
self.assertEqual({'_id': 1, 'i': 4},
c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}},
new=True, fields={'i': 1}))
# Test with full_response=True (version > 2.4.2)
result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}},
new=True, upsert=True,
full_response=True,
fields={'i': 1})
self.assertEqual({'_id': 1, 'i': 5}, result["value"])
self.assertEqual(True, result["lastErrorObject"]["updatedExisting"])
result = c.find_and_modify({'_id': 2}, {'$inc': {'i': 1}},
new=True, upsert=True,
full_response=True,
fields={'i': 1})
self.assertEqual({'_id': 2, 'i': 1}, result["value"])
self.assertEqual(False, result["lastErrorObject"]["updatedExisting"])
class ExtendedDict(dict):
pass
result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}},
new=True, fields={'i': 1})
self.assertFalse(isinstance(result, ExtendedDict))
result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}},
new=True, fields={'i': 1},
as_class=ExtendedDict)
self.assertTrue(isinstance(result, ExtendedDict))
def test_find_and_modify_with_sort(self):
c = self.db.test
c.drop()
for j in xrange(5):
c.insert({'j': j, 'i': 0})
sort={'j': DESCENDING}
self.assertEqual(4, c.find_and_modify({},
{'$inc': {'i': 1}},
sort=sort)['j'])
sort={'j': ASCENDING}
self.assertEqual(0, c.find_and_modify({},
{'$inc': {'i': 1}},
sort=sort)['j'])
sort=[('j', DESCENDING)]
self.assertEqual(4, c.find_and_modify({},
{'$inc': {'i': 1}},
sort=sort)['j'])
sort=[('j', ASCENDING)]
self.assertEqual(0, c.find_and_modify({},
{'$inc': {'i': 1}},
sort=sort)['j'])
sort=SON([('j', DESCENDING)])
self.assertEqual(4, c.find_and_modify({},
{'$inc': {'i': 1}},
sort=sort)['j'])
sort=SON([('j', ASCENDING)])
self.assertEqual(0, c.find_and_modify({},
{'$inc': {'i': 1}},
sort=sort)['j'])
try:
from collections import OrderedDict
sort=OrderedDict([('j', DESCENDING)])
self.assertEqual(4, c.find_and_modify({},
{'$inc': {'i': 1}},
sort=sort)['j'])
sort=OrderedDict([('j', ASCENDING)])
self.assertEqual(0, c.find_and_modify({},
{'$inc': {'i': 1}},
sort=sort)['j'])
except ImportError:
pass
# Test that a standard dict with two keys is rejected.
sort={'j': DESCENDING, 'foo': DESCENDING}
self.assertRaises(TypeError, c.find_and_modify, {},
{'$inc': {'i': 1}},
sort=sort)
def test_find_with_nested(self):
if not version.at_least(self.db.connection, (2, 0, 0)):
raise SkipTest("nested $and and $or requires MongoDB >= 2.0")
c = self.db.test
c.drop()
c.insert([{'i': i} for i in range(5)]) # [0, 1, 2, 3, 4]
self.assertEqual(
[2],
[i['i'] for i in c.find({
'$and': [
{
# This clause gives us [1,2,4]
'$or': [
{'i': {'$lte': 2}},
{'i': {'$gt': 3}},
],
},
{
# This clause gives us [2,3]
'$or': [
{'i': 2},
{'i': 3},
]
},
]
})]
)
self.assertEqual(
[0, 1, 2],
[i['i'] for i in c.find({
'$or': [
{
# This clause gives us [2]
'$and': [
{'i': {'$gte': 2}},
{'i': {'$lt': 3}},
],
},
{
# This clause gives us [0,1]
'$and': [
{'i': {'$gt': -100}},
{'i': {'$lt': 2}},
]
},
]
})]
)
def test_disabling_manipulators(self):
class IncByTwo(SONManipulator):
def transform_outgoing(self, son, collection):
if 'foo' in son:
son['foo'] += 2
return son
db = self.client.pymongo_test
db.add_son_manipulator(IncByTwo())
c = db.test
c.drop()
c.insert({'foo': 0})
self.assertEqual(2, c.find_one()['foo'])
self.assertEqual(0, c.find_one(manipulate=False)['foo'])
self.assertEqual(2, c.find_one(manipulate=True)['foo'])
c.remove({})
def test_uuid_subtype(self):
if not have_uuid:
raise SkipTest("No uuid module")
coll = self.client.pymongo_test.uuid
coll.drop()
def change_subtype(collection, subtype):
collection.uuid_subtype = subtype
# Test property
self.assertEqual(OLD_UUID_SUBTYPE, coll.uuid_subtype)
self.assertRaises(ConfigurationError, change_subtype, coll, 7)
self.assertRaises(ConfigurationError, change_subtype, coll, 2)
# Test basic query
uu = uuid.uuid4()
# Insert as binary subtype 3
coll.insert({'uu': uu})
self.assertEqual(uu, coll.find_one({'uu': uu})['uu'])
coll.uuid_subtype = UUID_SUBTYPE
self.assertEqual(UUID_SUBTYPE, coll.uuid_subtype)
self.assertEqual(None, coll.find_one({'uu': uu}))
self.assertEqual(uu, coll.find_one({'uu': UUIDLegacy(uu)})['uu'])
# Test Cursor.count
self.assertEqual(0, coll.find({'uu': uu}).count())
coll.uuid_subtype = OLD_UUID_SUBTYPE
self.assertEqual(1, coll.find({'uu': uu}).count())
# Test remove
coll.uuid_subtype = UUID_SUBTYPE
coll.remove({'uu': uu})
self.assertEqual(1, coll.count())
coll.uuid_subtype = OLD_UUID_SUBTYPE
coll.remove({'uu': uu})
self.assertEqual(0, coll.count())
# Test save
coll.insert({'_id': uu, 'i': 0})
self.assertEqual(1, coll.count())
self.assertEqual(1, coll.find({'_id': uu}).count())
self.assertEqual(0, coll.find_one({'_id': uu})['i'])
doc = coll.find_one({'_id': uu})
doc['i'] = 1
coll.save(doc)
self.assertEqual(1, coll.find_one({'_id': uu})['i'])
# Test update
coll.uuid_subtype = UUID_SUBTYPE
coll.update({'_id': uu}, {'$set': {'i': 2}})
coll.uuid_subtype = OLD_UUID_SUBTYPE
self.assertEqual(1, coll.find_one({'_id': uu})['i'])
coll.update({'_id': uu}, {'$set': {'i': 2}})
self.assertEqual(2, coll.find_one({'_id': uu})['i'])
# Test Cursor.distinct
self.assertEqual([2], coll.find({'_id': uu}).distinct('i'))
coll.uuid_subtype = UUID_SUBTYPE
self.assertEqual([], coll.find({'_id': uu}).distinct('i'))
# Test find_and_modify
self.assertEqual(None, coll.find_and_modify({'_id': uu},
{'$set': {'i': 5}}))
coll.uuid_subtype = OLD_UUID_SUBTYPE
self.assertEqual(2, coll.find_and_modify({'_id': uu},
{'$set': {'i': 5}})['i'])
self.assertEqual(5, coll.find_one({'_id': uu})['i'])
# Test command
db = self.client.pymongo_test
no_obj_error = "No matching object found"
result = db.command('findAndModify', 'uuid',
allowable_errors=[no_obj_error],
uuid_subtype=UUID_SUBTYPE,
query={'_id': uu},
update={'$set': {'i': 6}})
self.assertEqual(None, result.get('value'))
self.assertEqual(5, db.command('findAndModify', 'uuid',
update={'$set': {'i': 6}},
query={'_id': uu})['value']['i'])
self.assertEqual(6, db.command('findAndModify', 'uuid',
update={'$set': {'i': 7}},
query={'_id': UUIDLegacy(uu)}
)['value']['i'])
# Test (inline)_map_reduce
coll.drop()
coll.insert({"_id": uu, "x": 1, "tags": ["dog", "cat"]})
coll.insert({"_id": uuid.uuid4(), "x": 3,
"tags": ["mouse", "cat", "dog"]})
map = Code("function () {"
" this.tags.forEach(function(z) {"
" emit(z, 1);"
" });"
"}")
reduce = Code("function (key, values) {"
" var total = 0;"
" for (var i = 0; i < values.length; i++) {"
" total += values[i];"
" }"
" return total;"
"}")
coll.uuid_subtype = UUID_SUBTYPE
q = {"_id": uu}
if version.at_least(self.db.connection, (1, 7, 4)):
result = coll.inline_map_reduce(map, reduce, query=q)
self.assertEqual([], result)
result = coll.map_reduce(map, reduce, "results", query=q)
self.assertEqual(0, db.results.count())
coll.uuid_subtype = OLD_UUID_SUBTYPE
q = {"_id": uu}
if version.at_least(self.db.connection, (1, 7, 4)):
result = coll.inline_map_reduce(map, reduce, query=q)
self.assertEqual(2, len(result))
result = coll.map_reduce(map, reduce, "results", query=q)
self.assertEqual(2, db.results.count())
db.drop_collection("result")
coll.drop()
# Test group
coll.insert({"_id": uu, "a": 2})
coll.insert({"_id": uuid.uuid4(), "a": 1})
reduce = "function (obj, prev) { prev.count++; }"
coll.uuid_subtype = UUID_SUBTYPE
self.assertEqual([],
coll.group([], {"_id": uu},
{"count": 0}, reduce))
coll.uuid_subtype = OLD_UUID_SUBTYPE
self.assertEqual([{"count": 1}],
coll.group([], {"_id": uu},
{"count": 0}, reduce))
if __name__ == "__main__":
unittest.main()
|
PYSEC-2013-30
|
nova/tests/virt/vmwareapi/test_driver_api.py
|
@@ -34,6 +34,7 @@
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import task_states
+from nova.compute import vm_states
from nova import context
from nova import exception
from nova.openstack.common import jsonutils
@@ -1191,6 +1192,31 @@ def test_get_info(self):
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
+ def destroy_rescued(self, fake_method):
+ self._rescue()
+ with (
+ mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
+ fake_method)
+ ):
+ self.instance['vm_state'] = vm_states.RESCUED
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ inst_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(inst_path))
+ rescue_file_path = '[%s] %s-rescue/%s-rescue.vmdk' % (self.ds,
+ self.uuid,
+ self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(rescue_file_path))
+
+ def test_destroy_rescued(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ pass
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
+ def test_destroy_rescued_with_exception(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ raise exception.NovaException('Here is my fake exception')
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
def test_destroy(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMwareAPI.
"""
import collections
import contextlib
import copy
import datetime
import time
import mock
import mox
from oslo.config import cfg
import suds
from nova import block_device
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import task_states
from nova import context
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_instance
import nova.tests.image.fake
from nova.tests import matchers
from nova.tests import test_flavors
from nova.tests import utils
from nova.tests.virt.vmwareapi import stubs
from nova import utils as nova_utils
from nova.virt import driver as v_driver
from nova.virt import fake
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import fake as vmwareapi_fake
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import vmware_images
from nova.virt.vmwareapi import volume_util
from nova.virt.vmwareapi import volumeops
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('remove_unused_original_minimum_age_seconds',
'nova.virt.imagecache')
class fake_vm_ref(object):
def __init__(self):
self.value = 4
self._type = 'VirtualMachine'
class fake_service_content(object):
def __init__(self):
self.ServiceContent = vmwareapi_fake.DataObject()
self.ServiceContent.fake = 'fake'
class VMwareSudsTest(test.NoDBTestCase):
def setUp(self):
super(VMwareSudsTest, self).setUp()
def new_client_init(self, url, **kwargs):
return
mock.patch.object(suds.client.Client,
'__init__', new=new_client_init).start()
self.vim = self._vim_create()
self.addCleanup(mock.patch.stopall)
def _vim_create(self):
def fake_retrieve_service_content(fake):
return fake_service_content()
self.stubs.Set(vim.Vim, 'retrieve_service_content',
fake_retrieve_service_content)
return vim.Vim()
def test_exception_with_deepcopy(self):
self.assertIsNotNone(self.vim)
self.assertRaises(error_util.VimException,
copy.deepcopy, self.vim)
class VMwareSessionTestCase(test.NoDBTestCase):
def _fake_is_vim_object(self, module):
return True
@mock.patch('time.sleep')
def test_call_method_vim_fault(self, mock_sleep):
def _fake_create_session(self):
session = vmwareapi_fake.DataObject()
session.key = 'fake_key'
session.userName = 'fake_username'
self._session = session
def _fake_session_is_active(self):
return False
with contextlib.nested(
mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
self._fake_is_vim_object),
mock.patch.object(driver.VMwareAPISession, '_create_session',
_fake_create_session),
mock.patch.object(driver.VMwareAPISession, '_session_is_active',
_fake_session_is_active)
) as (_fake_vim, _fake_create, _fake_is_active):
api_session = driver.VMwareAPISession()
args = ()
kwargs = {}
self.assertRaises(error_util.VimFaultException,
api_session._call_method,
stubs, 'fake_temp_method_exception',
*args, **kwargs)
def test_call_method_vim_empty(self):
def _fake_create_session(self):
session = vmwareapi_fake.DataObject()
session.key = 'fake_key'
session.userName = 'fake_username'
self._session = session
def _fake_session_is_active(self):
return True
with contextlib.nested(
mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
self._fake_is_vim_object),
mock.patch.object(driver.VMwareAPISession, '_create_session',
_fake_create_session),
mock.patch.object(driver.VMwareAPISession, '_session_is_active',
_fake_session_is_active)
) as (_fake_vim, _fake_create, _fake_is_active):
api_session = driver.VMwareAPISession()
args = ()
kwargs = {}
res = api_session._call_method(stubs, 'fake_temp_method_exception',
*args, **kwargs)
self.assertEqual([], res)
@mock.patch('time.sleep')
def test_call_method_session_exception(self, mock_sleep):
def _fake_create_session(self):
session = vmwareapi_fake.DataObject()
session.key = 'fake_key'
session.userName = 'fake_username'
self._session = session
with contextlib.nested(
mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
self._fake_is_vim_object),
mock.patch.object(driver.VMwareAPISession, '_create_session',
_fake_create_session),
) as (_fake_vim, _fake_create):
api_session = driver.VMwareAPISession()
args = ()
kwargs = {}
self.assertRaises(error_util.SessionConnectionException,
api_session._call_method,
stubs, 'fake_temp_session_exception',
*args, **kwargs)
def test_call_method_session_file_exists_exception(self):
def _fake_create_session(self):
session = vmwareapi_fake.DataObject()
session.key = 'fake_key'
session.userName = 'fake_username'
self._session = session
with contextlib.nested(
mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
self._fake_is_vim_object),
mock.patch.object(driver.VMwareAPISession, '_create_session',
_fake_create_session),
) as (_fake_vim, _fake_create):
api_session = driver.VMwareAPISession()
args = ()
kwargs = {}
self.assertRaises(error_util.FileAlreadyExistsException,
api_session._call_method,
stubs, 'fake_session_file_exception',
*args, **kwargs)
class VMwareAPIConfTestCase(test.NoDBTestCase):
"""Unit tests for VMWare API configurations."""
def setUp(self):
super(VMwareAPIConfTestCase, self).setUp()
vm_util.vm_refs_cache_reset()
def tearDown(self):
super(VMwareAPIConfTestCase, self).tearDown()
def test_configure_without_wsdl_loc_override(self):
# Test the default configuration behavior. By default,
# use the WSDL sitting on the host we are talking to in
# order to bind the SOAP client.
wsdl_loc = cfg.CONF.vmware.wsdl_location
self.assertIsNone(wsdl_loc)
wsdl_url = vim.Vim.get_wsdl_url("https", "www.example.com")
url = vim.Vim.get_soap_url("https", "www.example.com")
self.assertEqual("https://www.example.com/sdk/vimService.wsdl",
wsdl_url)
self.assertEqual("https://www.example.com/sdk", url)
def test_configure_without_wsdl_loc_override_using_ipv6(self):
# Same as above but with ipv6 based host ip
wsdl_loc = cfg.CONF.vmware.wsdl_location
self.assertIsNone(wsdl_loc)
wsdl_url = vim.Vim.get_wsdl_url("https", "::1")
url = vim.Vim.get_soap_url("https", "::1")
self.assertEqual("https://[::1]/sdk/vimService.wsdl",
wsdl_url)
self.assertEqual("https://[::1]/sdk", url)
def test_configure_with_wsdl_loc_override(self):
# Use the setting vmwareapi_wsdl_loc to override the
# default path to the WSDL.
#
# This is useful as a work-around for XML parsing issues
# found when using some WSDL in combination with some XML
# parsers.
#
# The wsdl_url should point to a different host than the one we
# are actually going to send commands to.
fake_wsdl = "https://www.test.com/sdk/foo.wsdl"
self.flags(wsdl_location=fake_wsdl, group='vmware')
wsdl_loc = cfg.CONF.vmware.wsdl_location
self.assertIsNotNone(wsdl_loc)
self.assertEqual(fake_wsdl, wsdl_loc)
wsdl_url = vim.Vim.get_wsdl_url("https", "www.example.com")
url = vim.Vim.get_soap_url("https", "www.example.com")
self.assertEqual(fake_wsdl, wsdl_url)
self.assertEqual("https://www.example.com/sdk", url)
class VMwareAPIVMTestCase(test.NoDBTestCase):
"""Unit tests for Vmware API connection calls."""
def setUp(self):
super(VMwareAPIVMTestCase, self).setUp()
vm_util.vm_refs_cache_reset()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(host_ip='test_url',
host_username='test_username',
host_password='test_pass',
datastore_regex='.*',
api_retry_count=1,
use_linked_clone=False, group='vmware')
self.flags(vnc_enabled=False,
image_cache_subdirectory_name='vmware_base',
my_ip='')
self.user_id = 'fake'
self.project_id = 'fake'
self.node_name = 'test_url'
self.ds = 'ds1'
self.context = context.RequestContext(self.user_id, self.project_id)
stubs.set_stubs(self.stubs)
vmwareapi_fake.reset()
self.conn = driver.VMwareESXDriver(fake.FakeVirtAPI)
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = utils.get_test_network_info()
self.image = {
'id': 'c1c8ce3d-c2e0-4247-890c-ccf5cc1c004c',
'disk_format': 'vmdk',
'size': 512,
}
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.vnc_host = 'test_url'
self._set_exception_vars()
def tearDown(self):
super(VMwareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
nova.tests.image.fake.FakeImageService_reset()
def _set_exception_vars(self):
self.wait_task = self.conn._session._wait_for_task
self.call_method = self.conn._session._call_method
self.task_ref = None
self.exception = False
def test_driver_capabilities(self):
self.assertTrue(self.conn.capabilities['has_imagecache'])
self.assertFalse(self.conn.capabilities['supports_recreate'])
def test_login_retries(self):
self.attempts = 0
self.login_session = vmwareapi_fake.FakeVim()._login()
def _fake_login(_self):
self.attempts += 1
if self.attempts == 1:
raise exception.NovaException('Here is my fake exception')
return self.login_session
def _fake_check_session(_self):
return True
self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login)
self.stubs.Set(time, 'sleep', lambda x: None)
self.stubs.Set(vmwareapi_fake.FakeVim, '_check_session',
_fake_check_session)
self.conn = driver.VMwareAPISession()
self.assertEqual(self.attempts, 2)
def test_wait_for_task_exception(self):
self.flags(task_poll_interval=1, group='vmware')
self.login_session = vmwareapi_fake.FakeVim()._login()
self.stop_called = 0
def _fake_login(_self):
return self.login_session
self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login)
def fake_poll_task(task_ref, done):
done.send_exception(exception.NovaException('fake exception'))
def fake_stop_loop(loop):
self.stop_called += 1
return loop.stop()
self.conn = driver.VMwareAPISession()
self.stubs.Set(self.conn, "_poll_task",
fake_poll_task)
self.stubs.Set(self.conn, "_stop_loop",
fake_stop_loop)
self.assertRaises(exception.NovaException,
self.conn._wait_for_task, 'fake-ref')
self.assertEqual(self.stop_called, 1)
def _get_instance_type_by_name(self, type):
for instance_type in test_flavors.DEFAULT_FLAVORS:
if instance_type['name'] == type:
return instance_type
if type == 'm1.micro':
return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
'name': 'm1.micro', 'deleted': 0, 'created_at': None,
'ephemeral_gb': 0, 'updated_at': None,
'disabled': False, 'vcpus': 1, 'extra_specs': {},
'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
'flavorid': '1', 'vcpu_weight': None, 'id': 2}
def _create_instance(self, node=None, set_image_ref=True,
uuid=None, instance_type='m1.large'):
if not node:
node = self.node_name
if not uuid:
uuid = uuidutils.generate_uuid()
self.type_data = self._get_instance_type_by_name(instance_type)
values = {'name': 'fake_name',
'id': 1,
'uuid': uuid,
'project_id': self.project_id,
'user_id': self.user_id,
'kernel_id': "fake_kernel_uuid",
'ramdisk_id': "fake_ramdisk_uuid",
'mac_address': "de:ad:be:ef:be:ef",
'flavor': instance_type,
'node': node,
'memory_mb': self.type_data['memory_mb'],
'root_gb': self.type_data['root_gb'],
'ephemeral_gb': self.type_data['ephemeral_gb'],
'vcpus': self.type_data['vcpus'],
'swap': self.type_data['swap'],
}
if set_image_ref:
values['image_ref'] = "fake_image_uuid"
self.instance_node = node
self.uuid = uuid
self.instance = fake_instance.fake_instance_obj(
self.context, **values)
def _create_vm(self, node=None, num_instances=1, uuid=None,
instance_type='m1.large'):
"""Create and spawn the VM."""
if not node:
node = self.node_name
self._create_instance(node=node, uuid=uuid,
instance_type=instance_type)
self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=None)
self._check_vm_record(num_instances=num_instances)
self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid))
def _check_vm_record(self, num_instances=1):
"""Check if the spawned VM's properties correspond to the instance in
the db.
"""
instances = self.conn.list_instances()
self.assertEqual(len(instances), num_instances)
# Get Nova record for VM
vm_info = self.conn.get_info({'uuid': self.uuid,
'name': 1,
'node': self.instance_node})
# Get record for VM
vms = vmwareapi_fake._get_objects("VirtualMachine")
for vm in vms.objects:
if vm.get('name') == self.uuid:
break
# Check that m1.large above turned into the right thing.
mem_kib = long(self.type_data['memory_mb']) << 10
vcpus = self.type_data['vcpus']
self.assertEqual(vm_info['max_mem'], mem_kib)
self.assertEqual(vm_info['mem'], mem_kib)
self.assertEqual(vm.get("summary.config.instanceUuid"), self.uuid)
self.assertEqual(vm.get("summary.config.numCpu"), vcpus)
self.assertEqual(vm.get("summary.config.memorySizeMB"),
self.type_data['memory_mb'])
self.assertEqual(
vm.get("config.hardware.device")[2].device.obj_name,
"ns0:VirtualE1000")
# Check that the VM is running according to Nova
self.assertEqual(vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to vSphere API.
self.assertEqual(vm.get("runtime.powerState"), 'poweredOn')
found_vm_uuid = False
found_iface_id = False
for c in vm.get("config.extraConfig").OptionValue:
if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']):
found_vm_uuid = True
if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"):
found_iface_id = True
self.assertTrue(found_vm_uuid)
self.assertTrue(found_iface_id)
def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
"""Check if the get_info returned values correspond to the instance
object in the db.
"""
mem_kib = long(self.type_data['memory_mb']) << 10
self.assertEqual(info["state"], pwr_state)
self.assertEqual(info["max_mem"], mem_kib)
self.assertEqual(info["mem"], mem_kib)
self.assertEqual(info["num_cpu"], self.type_data['vcpus'])
def test_list_instances(self):
instances = self.conn.list_instances()
self.assertEqual(len(instances), 0)
def test_list_instances_1(self):
self._create_vm()
instances = self.conn.list_instances()
self.assertEqual(len(instances), 1)
def test_list_instance_uuids(self):
self._create_vm()
uuids = self.conn.list_instance_uuids()
self.assertEqual(len(uuids), 1)
def test_list_instance_uuids_invalid_uuid(self):
self._create_vm(uuid='fake_id')
uuids = self.conn.list_instance_uuids()
self.assertEqual(len(uuids), 0)
def _cached_files_exist(self, exists=True):
cache = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.vmdk' %
self.ds)
if exists:
self.assertTrue(vmwareapi_fake.get_file(cache))
else:
self.assertFalse(vmwareapi_fake.get_file(cache))
def test_instance_dir_disk_created(self):
"""Test image file is cached when even when use_linked_clone
is False
"""
self._create_vm()
inst_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
cache = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.vmdk' %
self.ds)
self.assertTrue(vmwareapi_fake.get_file(inst_file_path))
self._cached_files_exist()
def test_cache_dir_disk_created(self):
"""Test image disk is cached when use_linked_clone is True."""
self.flags(use_linked_clone=True, group='vmware')
self._create_vm()
file = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.vmdk' %
self.ds)
root = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.80.vmdk' %
self.ds)
self.assertTrue(vmwareapi_fake.get_file(file))
self.assertTrue(vmwareapi_fake.get_file(root))
def _iso_disk_type_created(self, instance_type='m1.large'):
self.image['disk_format'] = 'iso'
self._create_vm(instance_type=instance_type)
file = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.iso' %
self.ds)
self.assertTrue(vmwareapi_fake.get_file(file))
def test_iso_disk_type_created(self):
self._iso_disk_type_created()
vmdk_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
self.assertTrue(vmwareapi_fake.get_file(vmdk_file_path))
def test_iso_disk_type_created_with_root_gb_0(self):
self._iso_disk_type_created(instance_type='m1.micro')
vmdk_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
self.assertFalse(vmwareapi_fake.get_file(vmdk_file_path))
def test_iso_disk_cdrom_attach(self):
self.iso_path = (
'[%s] vmware_base/fake_image_uuid/fake_image_uuid.iso' % self.ds)
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, self.iso_path)
self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
fake_attach_cdrom)
self.image['disk_format'] = 'iso'
self._create_vm()
def test_iso_disk_cdrom_attach_with_config_drive(self):
self.flags(force_config_drive=True)
self.iso_path = [
('[%s] vmware_base/fake_image_uuid/fake_image_uuid.iso' %
self.ds),
'[%s] fake-config-drive' % self.ds]
self.iso_unit_nos = [0, 1]
self.iso_index = 0
def fake_create_config_drive(instance, injected_files, password,
data_store_name, folder, uuid, cookies):
return 'fake-config-drive'
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, self.iso_path[self.iso_index])
self.iso_index += 1
self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
fake_attach_cdrom)
self.stubs.Set(self.conn._vmops, '_create_config_drive',
fake_create_config_drive)
self.image['disk_format'] = 'iso'
self._create_vm()
self.assertEqual(self.iso_index, 2)
def test_cdrom_attach_with_config_drive(self):
self.flags(force_config_drive=True)
self.iso_path = '[%s] fake-config-drive' % self.ds
self.cd_attach_called = False
def fake_create_config_drive(instance, injected_files, password,
data_store_name, folder, uuid, cookies):
return 'fake-config-drive'
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, self.iso_path)
self.cd_attach_called = True
self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
fake_attach_cdrom)
self.stubs.Set(self.conn._vmops, '_create_config_drive',
fake_create_config_drive)
self._create_vm()
self.assertTrue(self.cd_attach_called)
def test_spawn(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
def _spawn_with_delete_exception(self, fault=None):
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "DeleteDatastoreFile_Task":
self.exception = True
task_mdo = vmwareapi_fake.create_task(method, "error",
error_fault=fault)
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
if fault:
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
else:
self.assertRaises(error_util.VMwareDriverException,
self._create_vm)
self.assertTrue(self.exception)
def test_spawn_with_delete_exception_not_found(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileNotFound())
def test_spawn_with_delete_exception_file_fault(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileFault())
def test_spawn_with_delete_exception_cannot_delete_file(self):
self._spawn_with_delete_exception(vmwareapi_fake.CannotDeleteFile())
def test_spawn_with_delete_exception_file_locked(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileLocked())
def test_spawn_with_delete_exception_general(self):
self._spawn_with_delete_exception()
def test_spawn_disk_extend(self):
self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
requested_size = 80 * units.Mi
self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
requested_size, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_disk_extend_exists(self):
root = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.80.vmdk' %
self.ds)
self.root = root
def _fake_extend(instance, requested_size, name, dc_ref):
vmwareapi_fake._add_file(self.root)
self.stubs.Set(self.conn._vmops, '_extend_virtual_disk',
_fake_extend)
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.assertTrue(vmwareapi_fake.get_file(root))
def test_spawn_disk_extend_sparse(self):
self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties')
result = [1024, {"vmware_ostype": "otherGuest",
"vmware_adaptertype": "lsiLogic",
"vmware_disktype": "sparse"}]
vmware_images.get_vmdk_size_and_properties(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(result)
self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
requested_size = 80 * units.Mi
self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
requested_size, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_disk_extend_insufficient_disk_space(self):
self.flags(use_linked_clone=True, group='vmware')
self.wait_task = self.conn._session._wait_for_task
self.call_method = self.conn._session._call_method
self.task_ref = None
id = 'fake_image_uuid'
cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
id, id)
tmp_file = '[%s] vmware_base/%s/%s.80-flat.vmdk' % (self.ds,
id, id)
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
self.assertTrue(vmwareapi_fake.get_file(cached_image))
self.assertTrue(vmwareapi_fake.get_file(tmp_file))
raise exception.NovaException('No space!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "ExtendVirtualDisk_Task":
self.task_ref = task_ref
return task_ref
self.stubs.Set(self.conn._session, "_call_method", fake_call_method)
self.stubs.Set(self.conn._session, "_wait_for_task",
fake_wait_for_task)
self.assertRaises(exception.NovaException,
self._create_vm)
self.assertFalse(vmwareapi_fake.get_file(cached_image))
self.assertFalse(vmwareapi_fake.get_file(tmp_file))
def test_spawn_disk_invalid_disk_size(self):
self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties')
result = [82 * units.Gi,
{"vmware_ostype": "otherGuest",
"vmware_adaptertype": "lsiLogic",
"vmware_disktype": "sparse"}]
vmware_images.get_vmdk_size_and_properties(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(result)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceUnacceptable,
self._create_vm)
def test_spawn_invalid_disk_format(self):
self._create_instance()
self.image['disk_format'] = 'invalid'
self.assertRaises(exception.InvalidDiskFormat,
self.conn.spawn, self.context,
self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=None)
def test_spawn_with_move_file_exists_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise an file exists exception. The flag
# self.exception will be checked to see that
# the exception has indeed been raised.
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
self.exception = True
raise error_util.FileAlreadyExistsException()
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.task_ref = task_ref
return task_ref
with contextlib.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.assertTrue(self.exception)
def test_spawn_with_move_general_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise a general exception. The flag self.exception
# will be checked to see that the exception has
# indeed been raised.
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
self.exception = True
raise error_util.VMwareDriverException('Exception!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.task_ref = task_ref
return task_ref
with contextlib.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
self.assertRaises(error_util.VMwareDriverException,
self._create_vm)
self.assertTrue(self.exception)
def test_spawn_with_move_poll_exception(self):
self.call_method = self.conn._session._call_method
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
task_mdo = vmwareapi_fake.create_task(method, "error")
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
self.assertRaises(error_util.VMwareDriverException,
self._create_vm)
def test_spawn_with_move_file_exists_poll_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise a file exists exception. The flag self.exception
# will be checked to see that the exception has
# indeed been raised.
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.exception = True
task_mdo = vmwareapi_fake.create_task(method, "error",
error_fault=vmwareapi_fake.FileAlreadyExists())
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.assertTrue(self.exception)
def _spawn_attach_volume_vmdk(self, set_image_ref=True, vc_support=False):
self._create_instance(set_image_ref=set_image_ref)
self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
connection_info = self._test_vmdk_connection_info('vmdk')
root_disk = [{'connection_info': connection_info}]
v_driver.block_device_info_get_mapping(
mox.IgnoreArg()).AndReturn(root_disk)
if vc_support:
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_get_res_pool_of_vm')
volumeops.VMwareVolumeOps._get_res_pool_of_vm(
mox.IgnoreArg()).AndReturn('fake_res_pool')
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_relocate_vmdk_volume')
volumeops.VMwareVolumeOps._relocate_vmdk_volume(mox.IgnoreArg(),
'fake_res_pool', mox.IgnoreArg())
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_volume')
volumeops.VMwareVolumeOps.attach_volume(connection_info,
self.instance, mox.IgnoreArg())
self.mox.ReplayAll()
block_device_info = {'mount_device': 'vda'}
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=block_device_info)
def test_spawn_attach_volume_vmdk(self):
self._spawn_attach_volume_vmdk()
def test_spawn_attach_volume_vmdk_no_image_ref(self):
self._spawn_attach_volume_vmdk(set_image_ref=False)
def test_spawn_attach_volume_iscsi(self):
self._create_instance()
self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
connection_info = self._test_vmdk_connection_info('iscsi')
root_disk = [{'connection_info': connection_info}]
v_driver.block_device_info_get_mapping(
mox.IgnoreArg()).AndReturn(root_disk)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_volume')
volumeops.VMwareVolumeOps.attach_volume(connection_info,
self.instance, mox.IgnoreArg())
self.mox.ReplayAll()
block_device_info = {'mount_device': 'vda'}
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=block_device_info)
def mock_upload_image(self, context, image, instance, **kwargs):
self.assertEqual(image, 'Test-Snapshot')
self.assertEqual(instance, self.instance)
self.assertEqual(kwargs['disk_type'], 'preallocated')
def test_get_vm_ref_using_extra_config(self):
self._create_vm()
vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
self.instance['uuid'])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
# Disrupt the fake Virtual Machine object so that extraConfig
# cannot be matched.
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0]
fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = ""
# We should not get a Virtual Machine through extraConfig.
vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
self.instance['uuid'])
self.assertIsNone(vm_ref, 'VM Reference should be none')
# Check if we can find the Virtual Machine using the name.
vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
def test_search_vm_ref_by_identifier(self):
self._create_vm()
vm_ref = vm_util.search_vm_ref_by_identifier(self.conn._session,
self.instance['uuid'])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0]
fake_vm.set("summary.config.instanceUuid", "foo")
fake_vm.set("name", "foo")
fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "foo"
self.assertIsNone(vm_util.search_vm_ref_by_identifier(
self.conn._session, self.instance['uuid']),
"VM Reference should be none")
self.assertIsNotNone(
vm_util.search_vm_ref_by_identifier(self.conn._session, "foo"),
"VM Reference should not be none")
def test_get_object_for_optionvalue(self):
self._create_vm()
vms = self.conn._session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
vm_ref = vm_util._get_object_for_optionvalue(vms,
self.instance["uuid"])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
def _test_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
with mock.patch.object(vmware_images, 'upload_image',
self.mock_upload_image):
self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
func_call_matcher.call)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.assertIsNone(func_call_matcher.match())
def test_snapshot(self):
self._create_vm()
self._test_snapshot()
def test_snapshot_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
self.context, self.instance, "Test-Snapshot",
lambda *args, **kwargs: None)
def test_snapshot_delete_vm_snapshot(self):
self._create_vm()
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj
snapshot_ref = vmwareapi_fake.ManagedObjectReference(
value="Snapshot-123",
name="VirtualMachineSnapshot")
self.mox.StubOutWithMock(vmops.VMwareVMOps,
'_create_vm_snapshot')
self.conn._vmops._create_vm_snapshot(
self.instance, fake_vm).AndReturn(snapshot_ref)
self.mox.StubOutWithMock(vmops.VMwareVMOps,
'_delete_vm_snapshot')
self.conn._vmops._delete_vm_snapshot(
self.instance, fake_vm, snapshot_ref).AndReturn(None)
self.mox.ReplayAll()
self._test_snapshot()
def test_reboot(self):
self._create_vm()
info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_with_uuid(self):
"""Test fall back to use name when can't find by uuid."""
self._create_vm()
info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.reboot,
self.context, self.instance, self.network_info,
'SOFT')
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
self._create_vm()
instances = [self.instance]
self.conn.poll_rebooting_instances(60, instances)
def test_reboot_not_poweredon(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
self.context, self.instance, self.network_info,
'SOFT')
def test_suspend(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SUSPENDED)
def test_suspend_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.suspend,
self.instance)
def test_resume(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SUSPENDED)
self.conn.resume(self.context, self.instance, self.network_info)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
def test_resume_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.resume,
self.context, self.instance, self.network_info)
def test_resume_not_suspended(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
self.context, self.instance, self.network_info)
def test_power_on(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SHUTDOWN)
self.conn.power_on(self.context, self.instance, self.network_info)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
def test_power_on_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
self.context, self.instance, self.network_info)
def test_power_off(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SHUTDOWN)
def test_power_off_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
self.instance)
def test_power_off_suspended(self):
self._create_vm()
self.conn.suspend(self.instance)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstancePowerOffFailure,
self.conn.power_off, self.instance)
def test_resume_state_on_host_boot(self):
self._create_vm()
self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
self.mox.StubOutWithMock(self.conn, "reboot")
vm_util.get_vm_state_from_name(mox.IgnoreArg(),
self.instance['uuid']).AndReturn("poweredOff")
self.conn.reboot(self.context, self.instance, 'network_info',
'hard', None)
self.mox.ReplayAll()
self.conn.resume_state_on_host_boot(self.context, self.instance,
'network_info')
def test_resume_state_on_host_boot_no_reboot_1(self):
"""Don't call reboot on instance which is poweredon."""
self._create_vm()
self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
self.mox.StubOutWithMock(self.conn, 'reboot')
vm_util.get_vm_state_from_name(mox.IgnoreArg(),
self.instance['uuid']).AndReturn("poweredOn")
self.mox.ReplayAll()
self.conn.resume_state_on_host_boot(self.context, self.instance,
'network_info')
def test_resume_state_on_host_boot_no_reboot_2(self):
"""Don't call reboot on instance which is suspended."""
self._create_vm()
self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
self.mox.StubOutWithMock(self.conn, 'reboot')
vm_util.get_vm_state_from_name(mox.IgnoreArg(),
self.instance['uuid']).AndReturn("suspended")
self.mox.ReplayAll()
self.conn.resume_state_on_host_boot(self.context, self.instance,
'network_info')
def test_get_info(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
def test_destroy(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEqual(len(instances), 1)
self.conn.destroy(self.context, self.instance, self.network_info)
instances = self.conn.list_instances()
self.assertEqual(len(instances), 0)
self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
def test_destroy_no_datastore(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEqual(len(instances), 1)
# Overwrite the vmPathName
vms = vmwareapi_fake._get_objects("VirtualMachine")
vm = vms.objects[0]
vm.set("config.files.vmPathName", None)
self.conn.destroy(self.context, self.instance, self.network_info)
instances = self.conn.list_instances()
self.assertEqual(len(instances), 0)
def test_destroy_non_existent(self):
self._create_instance()
self.assertIsNone(self.conn.destroy(self.context, self.instance,
self.network_info))
def _rescue(self, config_drive=False):
def fake_attach_disk_to_vm(vm_ref, instance,
adapter_type, disk_type, vmdk_path=None,
disk_size=None, linked_clone=False,
controller_key=None, unit_number=None,
device_name=None):
info = self.conn.get_info(instance)
self._check_vm_info(info, power_state.SHUTDOWN)
if config_drive:
def fake_create_config_drive(instance, injected_files, password,
data_store_name, folder,
instance_uuid, cookies):
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
self.stubs.Set(self.conn._vmops, '_create_config_drive',
fake_create_config_drive)
self._create_vm()
info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.stubs.Set(self.conn._volumeops, "attach_disk_to_vm",
fake_attach_disk_to_vm)
self.conn.rescue(self.context, self.instance, self.network_info,
self.image, 'fake-password')
info = self.conn.get_info({'name': '1-rescue',
'uuid': '%s-rescue' % self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SHUTDOWN)
self.assertIsNotNone(vm_util.vm_ref_cache_get('%s-rescue' % self.uuid))
def test_rescue(self):
self._rescue()
inst_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
self.assertTrue(vmwareapi_fake.get_file(inst_file_path))
rescue_file_path = '[%s] %s-rescue/%s-rescue.vmdk' % (self.ds,
self.uuid,
self.uuid)
self.assertTrue(vmwareapi_fake.get_file(rescue_file_path))
def test_rescue_with_config_drive(self):
self.flags(force_config_drive=True)
self._rescue(config_drive=True)
def test_unrescue(self):
self._rescue()
self.test_vm_ref = None
self.test_device_name = None
def fake_power_off_vm_ref(vm_ref):
self.test_vm_ref = vm_ref
self.assertIsNotNone(vm_ref)
def fake_detach_disk_from_vm(vm_ref, instance,
device_name, destroy_disk=False):
self.test_device_name = device_name
info = self.conn.get_info(instance)
self._check_vm_info(info, power_state.SHUTDOWN)
with contextlib.nested(
mock.patch.object(self.conn._vmops, "_power_off_vm_ref",
side_effect=fake_power_off_vm_ref),
mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
side_effect=fake_detach_disk_from_vm),
) as (poweroff, detach):
self.conn.unrescue(self.instance, None)
poweroff.assert_called_once_with(self.test_vm_ref)
detach.assert_called_once_with(self.test_vm_ref, mock.ANY,
self.test_device_name)
self.test_vm_ref = None
self.test_device_name = None
info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
def test_pause(self):
# Tests that the VMwareESXDriver does not implement the pause method.
self.assertRaises(NotImplementedError, self.conn.pause, instance=None)
def test_unpause(self):
# Tests that the VMwareESXDriver does not implement the unpause method.
self.assertRaises(NotImplementedError, self.conn.unpause,
instance=None)
def test_get_diagnostics(self):
self._create_vm()
expected = {'memoryReservation': 0, 'suspendInterval': 0,
'maxCpuUsage': 2000, 'toolsInstallerMounted': False,
'consumedOverheadMemory': 20, 'numEthernetCards': 1,
'numCpu': 1, 'featureRequirement': [{'key': 'cpuid.AES'}],
'memoryOverhead': 21417984,
'guestMemoryUsage': 0, 'connectionState': 'connected',
'memorySizeMB': 512, 'balloonedMemory': 0,
'vmPathName': 'fake_path', 'template': False,
'overallCpuUsage': 0, 'powerState': 'poweredOn',
'cpuReservation': 0, 'overallCpuDemand': 0,
'numVirtualDisks': 1, 'hostMemoryUsage': 141}
expected = dict([('vmware:' + k, v) for k, v in expected.items()])
self.assertThat(
self.conn.get_diagnostics({'name': 1, 'uuid': self.uuid,
'node': self.instance_node}),
matchers.DictMatches(expected))
def test_get_console_output(self):
self.assertRaises(NotImplementedError, self.conn.get_console_output,
None, None)
def _test_finish_migration(self, power_on, resize_instance=False):
"""Tests the finish_migration method on vmops."""
self.power_on_called = False
self.wait_for_task = False
self.wait_task = self.conn._session._wait_for_task
def fake_power_on(instance):
self.assertEqual(self.instance, instance)
self.power_on_called = True
def fake_vmops_update_instance_progress(context, instance, step,
total_steps):
self.assertEqual(self.context, context)
self.assertEqual(self.instance, instance)
self.assertEqual(4, step)
self.assertEqual(vmops.RESIZE_TOTAL_STEPS, total_steps)
if resize_instance:
def fake_wait_for_task(task_ref):
self.wait_for_task = True
return self.wait_task(task_ref)
self.stubs.Set(self.conn._session, "_wait_for_task",
fake_wait_for_task)
self.stubs.Set(self.conn._vmops, "_power_on", fake_power_on)
self.stubs.Set(self.conn._vmops, "_update_instance_progress",
fake_vmops_update_instance_progress)
# setup the test instance in the database
self._create_vm()
# perform the migration on our stubbed methods
self.conn.finish_migration(context=self.context,
migration=None,
instance=self.instance,
disk_info=None,
network_info=None,
block_device_info=None,
resize_instance=resize_instance,
image_meta=None,
power_on=power_on)
if resize_instance:
self.assertTrue(self.wait_for_task)
else:
self.assertFalse(self.wait_for_task)
def test_finish_migration_power_on(self):
self.assertRaises(NotImplementedError,
self._test_finish_migration, power_on=True)
def test_finish_migration_power_off(self):
self.assertRaises(NotImplementedError,
self._test_finish_migration, power_on=False)
def test_confirm_migration(self):
self._create_vm()
self.assertRaises(NotImplementedError,
self.conn.confirm_migration, self.context,
self.instance, None)
def _test_finish_revert_migration(self, power_on):
"""Tests the finish_revert_migration method on vmops."""
# setup the test instance in the database
self._create_vm()
self.power_on_called = False
self.vm_name = str(self.instance['name']) + '-orig'
def fake_power_on(instance):
self.assertEqual(self.instance, instance)
self.power_on_called = True
def fake_get_orig_vm_name_label(instance):
self.assertEqual(self.instance, instance)
return self.vm_name
def fake_get_vm_ref_from_name(session, vm_name):
self.assertEqual(self.vm_name, vm_name)
return vmwareapi_fake._get_objects("VirtualMachine").objects[0]
def fake_get_vm_ref_from_uuid(session, vm_uuid):
return vmwareapi_fake._get_objects("VirtualMachine").objects[0]
def fake_call_method(*args, **kwargs):
pass
def fake_wait_for_task(*args, **kwargs):
pass
self.stubs.Set(self.conn._vmops, "_power_on", fake_power_on)
self.stubs.Set(self.conn._vmops, "_get_orig_vm_name_label",
fake_get_orig_vm_name_label)
self.stubs.Set(vm_util, "_get_vm_ref_from_uuid",
fake_get_vm_ref_from_uuid)
self.stubs.Set(vm_util, "get_vm_ref_from_name",
fake_get_vm_ref_from_name)
self.stubs.Set(self.conn._session, "_call_method", fake_call_method)
self.stubs.Set(self.conn._session, "_wait_for_task",
fake_wait_for_task)
# perform the revert on our stubbed methods
self.conn.finish_revert_migration(self.context,
instance=self.instance,
network_info=None,
power_on=power_on)
def test_finish_revert_migration_power_on(self):
self.assertRaises(NotImplementedError,
self._test_finish_migration, power_on=True)
def test_finish_revert_migration_power_off(self):
self.assertRaises(NotImplementedError,
self._test_finish_migration, power_on=False)
def test_get_console_pool_info(self):
info = self.conn.get_console_pool_info("console_type")
self.assertEqual(info['address'], 'test_url')
self.assertEqual(info['username'], 'test_username')
self.assertEqual(info['password'], 'test_pass')
def test_get_vnc_console_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
self.conn.get_vnc_console,
self.context,
self.instance)
def _test_get_vnc_console(self):
self._create_vm()
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0]
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
opt_val = OptionValue(key='', value=5906)
fake_vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
vnc_dict = self.conn.get_vnc_console(self.context, self.instance)
self.assertEqual(vnc_dict['host'], self.vnc_host)
self.assertEqual(vnc_dict['port'], 5906)
def test_get_vnc_console(self):
self._test_get_vnc_console()
def test_get_vnc_console_noport(self):
self._create_vm()
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0]
self.assertRaises(exception.ConsoleTypeUnavailable,
self.conn.get_vnc_console,
self.context,
self.instance)
def test_host_ip_addr(self):
self.assertEqual(self.conn.get_host_ip_addr(), "test_url")
def test_get_volume_connector(self):
self._create_vm()
connector_dict = self.conn.get_volume_connector(self.instance)
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0]
fake_vm_id = fake_vm.obj.value
self.assertEqual(connector_dict['ip'], 'test_url')
self.assertEqual(connector_dict['initiator'], 'iscsi-name')
self.assertEqual(connector_dict['host'], 'test_url')
self.assertEqual(connector_dict['instance'], fake_vm_id)
def _test_vmdk_connection_info(self, type):
return {'driver_volume_type': type,
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
def test_volume_attach_vmdk(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_attach_volume_vmdk')
volumeops.VMwareVolumeOps._attach_volume_vmdk(connection_info,
self.instance, mount_point)
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_volume_detach_vmdk(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_detach_volume_vmdk')
volumeops.VMwareVolumeOps._detach_volume_vmdk(connection_info,
self.instance, mount_point)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_attach_vmdk_disk_to_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
mount_point = '/dev/vdc'
# create fake backing info
volume_device = vmwareapi_fake.DataObject()
volume_device.backing = vmwareapi_fake.DataObject()
volume_device.backing.fileName = 'fake_path'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
volumeops.VMwareVolumeOps._get_vmdk_base_volume_device(
mox.IgnoreArg()).AndReturn(volume_device)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_disk_to_vm')
volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
self.instance, mox.IgnoreArg(), mox.IgnoreArg(),
vmdk_path='fake_path')
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_detach_vmdk_disk_from_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_get_volume_uuid')
volumeops.VMwareVolumeOps._get_volume_uuid(mox.IgnoreArg(),
'volume-fake-id').AndReturn('fake_disk_uuid')
self.mox.StubOutWithMock(vm_util, 'get_vmdk_backed_disk_device')
vm_util.get_vmdk_backed_disk_device(mox.IgnoreArg(),
'fake_disk_uuid').AndReturn('fake_device')
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_consolidate_vmdk_volume')
volumeops.VMwareVolumeOps._consolidate_vmdk_volume(self.instance,
mox.IgnoreArg(), 'fake_device', mox.IgnoreArg())
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'detach_disk_from_vm')
volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
self.instance, mox.IgnoreArg())
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_volume_attach_iscsi(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_attach_volume_iscsi')
volumeops.VMwareVolumeOps._attach_volume_iscsi(connection_info,
self.instance, mount_point)
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_volume_detach_iscsi(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_detach_volume_iscsi')
volumeops.VMwareVolumeOps._detach_volume_iscsi(connection_info,
self.instance, mount_point)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_attach_iscsi_disk_to_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
connection_info['data']['target_portal'] = 'fake_target_host:port'
connection_info['data']['target_iqn'] = 'fake_target_iqn'
mount_point = '/dev/vdc'
discover = ('fake_name', 'fake_uuid')
self.mox.StubOutWithMock(volume_util, 'find_st')
# simulate target not found
volume_util.find_st(mox.IgnoreArg(), connection_info['data'],
mox.IgnoreArg()).AndReturn((None, None))
self.mox.StubOutWithMock(volume_util, '_add_iscsi_send_target_host')
# rescan gets called with target portal
volume_util.rescan_iscsi_hba(
self.conn._session,
target_portal=connection_info['data']['target_portal'])
# simulate target found
volume_util.find_st(mox.IgnoreArg(), connection_info['data'],
mox.IgnoreArg()).AndReturn(discover)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_disk_to_vm')
volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
self.instance, mox.IgnoreArg(), 'rdmp',
device_name=mox.IgnoreArg())
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_rescan_iscsi_hba(self):
fake_target_portal = 'fake_target_host:port'
host_storage_sys = vmwareapi_fake._get_objects(
"HostStorageSystem").objects[0]
iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
'.hostBusAdapter')
iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
# Check the host system does not have the send target
self.assertRaises(AttributeError, getattr, iscsi_hba,
'configuredSendTarget')
# Rescan HBA with the target portal
volume_util.rescan_iscsi_hba(self.conn._session, None,
fake_target_portal)
# Check if HBA has the target portal configured
self.assertEqual('fake_target_host',
iscsi_hba.configuredSendTarget[0].address)
# Rescan HBA with same portal
volume_util.rescan_iscsi_hba(self.conn._session, None,
fake_target_portal)
self.assertEqual(1, len(iscsi_hba.configuredSendTarget))
def test_find_st(self):
data = {'target_portal': 'fake_target_host:port',
'target_iqn': 'fake_target_iqn'}
host = vmwareapi_fake._get_objects('HostSystem').objects[0]
host._add_iscsi_target(data)
result = volume_util.find_st(self.conn._session, data)
self.assertEqual(('fake-device', 'fake-uuid'), result)
def test_detach_iscsi_disk_from_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
connection_info['data']['target_portal'] = 'fake_target_portal'
connection_info['data']['target_iqn'] = 'fake_target_iqn'
mount_point = '/dev/vdc'
find = ('fake_name', 'fake_uuid')
self.mox.StubOutWithMock(volume_util, 'find_st')
volume_util.find_st(mox.IgnoreArg(), connection_info['data'],
mox.IgnoreArg()).AndReturn(find)
self.mox.StubOutWithMock(vm_util, 'get_rdm_disk')
device = 'fake_device'
vm_util.get_rdm_disk(mox.IgnoreArg(), 'fake_uuid').AndReturn(device)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'detach_disk_from_vm')
volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
self.instance, device, destroy_disk=True)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_connection_info_get(self):
self._create_vm()
connector = self.conn.get_volume_connector(self.instance)
self.assertEqual(connector['ip'], 'test_url')
self.assertEqual(connector['host'], 'test_url')
self.assertEqual(connector['initiator'], 'iscsi-name')
self.assertIn('instance', connector)
def test_connection_info_get_after_destroy(self):
self._create_vm()
self.conn.destroy(self.context, self.instance, self.network_info)
connector = self.conn.get_volume_connector(self.instance)
self.assertEqual(connector['ip'], 'test_url')
self.assertEqual(connector['host'], 'test_url')
self.assertEqual(connector['initiator'], 'iscsi-name')
self.assertNotIn('instance', connector)
def test_refresh_instance_security_rules(self):
self.assertRaises(NotImplementedError,
self.conn.refresh_instance_security_rules,
instance=None)
def test_image_aging_image_used(self):
self._create_vm()
all_instances = [self.instance]
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist()
def _get_timestamp_filename(self):
return '%s%s' % (imagecache.TIMESTAMP_PREFIX,
timeutils.strtime(at=self.old_time,
fmt=imagecache.TIMESTAMP_FORMAT))
def _override_time(self):
self.old_time = datetime.datetime(2012, 11, 22, 12, 00, 00)
def _fake_get_timestamp_filename(fake):
return self._get_timestamp_filename()
self.stubs.Set(imagecache.ImageCacheManager, '_get_timestamp_filename',
_fake_get_timestamp_filename)
def _timestamp_file_exists(self, exists=True):
timestamp = ('[%s] vmware_base/fake_image_uuid/%s/' %
(self.ds, self._get_timestamp_filename()))
if exists:
self.assertTrue(vmwareapi_fake.get_file(timestamp))
else:
self.assertFalse(vmwareapi_fake.get_file(timestamp))
def _image_aging_image_marked_for_deletion(self):
self._create_vm(uuid=uuidutils.generate_uuid())
self._cached_files_exist()
all_instances = []
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist()
self._timestamp_file_exists()
def test_image_aging_image_marked_for_deletion(self):
self._override_time()
self._image_aging_image_marked_for_deletion()
def _timestamp_file_removed(self):
self._override_time()
self._image_aging_image_marked_for_deletion()
self._create_vm(num_instances=2,
uuid=uuidutils.generate_uuid())
self._timestamp_file_exists(exists=False)
def test_timestamp_file_removed_spawn(self):
self._timestamp_file_removed()
def test_timestamp_file_removed_aging(self):
self._timestamp_file_removed()
ts = self._get_timestamp_filename()
ts_path = ('[%s] vmware_base/fake_image_uuid/%s/' %
(self.ds, ts))
vmwareapi_fake._add_file(ts_path)
self._timestamp_file_exists()
all_instances = [self.instance]
self.conn.manage_image_cache(self.context, all_instances)
self._timestamp_file_exists(exists=False)
def test_image_aging_disabled(self):
self._override_time()
self.flags(remove_unused_base_images=False)
self._create_vm()
self._cached_files_exist()
all_instances = []
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist(exists=True)
self._timestamp_file_exists(exists=False)
def _image_aging_aged(self, aging_time=100):
self._override_time()
cur_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.flags(remove_unused_original_minimum_age_seconds=aging_time)
self._image_aging_image_marked_for_deletion()
all_instances = []
timeutils.set_time_override(cur_time)
self.conn.manage_image_cache(self.context, all_instances)
def test_image_aging_aged(self):
self._image_aging_aged(aging_time=8)
self._cached_files_exist(exists=False)
def test_image_aging_not_aged(self):
self._image_aging_aged()
self._cached_files_exist()
class VMwareAPIHostTestCase(test.NoDBTestCase):
"""Unit tests for Vmware API host calls."""
def setUp(self):
super(VMwareAPIHostTestCase, self).setUp()
self.flags(image_cache_subdirectory_name='vmware_base')
vm_util.vm_refs_cache_reset()
self.flags(host_ip='test_url',
host_username='test_username',
host_password='test_pass', group='vmware')
vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
self.conn = driver.VMwareESXDriver(False)
def tearDown(self):
super(VMwareAPIHostTestCase, self).tearDown()
vmwareapi_fake.cleanup()
def test_host_state(self):
stats = self.conn.get_host_stats()
self.assertEqual(stats['vcpus'], 16)
self.assertEqual(stats['disk_total'], 1024)
self.assertEqual(stats['disk_available'], 500)
self.assertEqual(stats['disk_used'], 1024 - 500)
self.assertEqual(stats['host_memory_total'], 1024)
self.assertEqual(stats['host_memory_free'], 1024 - 500)
self.assertEqual(stats['hypervisor_version'], 5000000)
supported_instances = [('i686', 'vmware', 'hvm'),
('x86_64', 'vmware', 'hvm')]
self.assertEqual(stats['supported_instances'], supported_instances)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action(self.conn.host_power_action, 'shutdown')
def test_host_startup(self):
self._test_host_action(self.conn.host_power_action, 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode, True)
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode, False)
def test_get_host_uptime(self):
result = self.conn.get_host_uptime('host')
self.assertEqual('Please refer to test_url for the uptime', result)
class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
def setUp(self):
super(VMwareAPIVCDriverTestCase, self).setUp()
cluster_name = 'test_cluster'
cluster_name2 = 'test_cluster2'
self.flags(cluster_name=[cluster_name, cluster_name2],
api_retry_count=1,
task_poll_interval=10, datastore_regex='.*', group='vmware')
self.flags(vnc_enabled=False,
image_cache_subdirectory_name='vmware_base')
vmwareapi_fake.reset(vc=True)
self.conn = driver.VMwareVCDriver(None, False)
self.node_name = self.conn._resources.keys()[0]
self.node_name2 = self.conn._resources.keys()[1]
if cluster_name2 in self.node_name2:
self.ds = 'ds1'
else:
self.ds = 'ds2'
self.vnc_host = 'ha-host'
def tearDown(self):
super(VMwareAPIVCDriverTestCase, self).tearDown()
vmwareapi_fake.cleanup()
def test_list_instances(self):
instances = self.conn.list_instances()
self.assertEqual(0, len(instances))
def test_list_instances_from_nodes(self):
# Create instance on node1
self._create_vm(self.node_name)
# Create instances on the other node
self._create_vm(self.node_name2, num_instances=2)
self._create_vm(self.node_name2, num_instances=3)
node1_vmops = self.conn._get_vmops_for_compute_node(self.node_name)
node2_vmops = self.conn._get_vmops_for_compute_node(self.node_name2)
self.assertEqual(1, len(node1_vmops.list_instances()))
self.assertEqual(2, len(node2_vmops.list_instances()))
self.assertEqual(3, len(self.conn.list_instances()))
def _setup_mocks_for_session(self, mock_init):
mock_init.return_value = None
vcdriver = driver.VMwareVCDriver(None, False)
vcdriver._session = mock.Mock()
return vcdriver
@mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
def test_init_host_and_cleanup_host(self, mock_init):
vcdriver = self._setup_mocks_for_session(mock_init)
vcdriver.init_host("foo")
vcdriver._session._create_session.assert_called_once()
vcdriver.cleanup_host("foo")
vcdriver._session.vim.client.service.Logout.assert_called_once()
@mock.patch('nova.virt.vmwareapi.driver.LOG')
@mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
def test_cleanup_host_with_no_login(self, mock_init, mock_logger):
vcdriver = self._setup_mocks_for_session(mock_init)
vcdriver.init_host("foo")
vcdriver._session._create_session.assert_called_once()
# Not logged in...
# observe that no exceptions were thrown
mock_sc = mock.Mock()
vcdriver._session.vim.retrieve_service_content.return_value = mock_sc
web_fault = suds.WebFault(mock.Mock(), mock.Mock())
vcdriver._session.vim.client.service.Logout.side_effect = web_fault
vcdriver.cleanup_host("foo")
# assert that the mock Logout method was never called
vcdriver._session.vim.client.service.Logout.assert_called_once()
mock_logger.debug.assert_called_once()
def test_host_power_action(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'host', 'action')
def test_host_maintenance_mode(self):
self.assertRaises(NotImplementedError,
self.conn.host_maintenance_mode, 'host', 'mode')
def test_set_host_enabled(self):
self.assertRaises(NotImplementedError,
self.conn.set_host_enabled, 'host', 'state')
def test_datastore_regex_configured(self):
for node in self.conn._resources.keys():
self.assertEqual(self.conn._datastore_regex,
self.conn._resources[node]['vmops']._datastore_regex)
def test_get_available_resource(self):
stats = self.conn.get_available_resource(self.node_name)
cpu_info = {"model": ["Intel(R) Xeon(R)", "Intel(R) Xeon(R)"],
"vendor": ["Intel", "Intel"],
"topology": {"cores": 16,
"threads": 32}}
self.assertEqual(stats['vcpus'], 32)
self.assertEqual(stats['local_gb'], 1024)
self.assertEqual(stats['local_gb_used'], 1024 - 500)
self.assertEqual(stats['memory_mb'], 1000)
self.assertEqual(stats['memory_mb_used'], 500)
self.assertEqual(stats['hypervisor_type'], 'VMware vCenter Server')
self.assertEqual(stats['hypervisor_version'], 5001000)
self.assertEqual(stats['hypervisor_hostname'], self.node_name)
self.assertEqual(stats['cpu_info'], jsonutils.dumps(cpu_info))
self.assertEqual(stats['supported_instances'],
'[["i686", "vmware", "hvm"], ["x86_64", "vmware", "hvm"]]')
def test_invalid_datastore_regex(self):
# Tests if we raise an exception for Invalid Regular Expression in
# vmware_datastore_regex
self.flags(cluster_name=['test_cluster'], datastore_regex='fake-ds(01',
group='vmware')
self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None)
def test_get_available_nodes(self):
nodelist = self.conn.get_available_nodes()
self.assertEqual(len(nodelist), 2)
self.assertIn(self.node_name, nodelist)
self.assertIn(self.node_name2, nodelist)
def test_spawn_multiple_node(self):
def fake_is_neutron():
return False
self.stubs.Set(nova_utils, 'is_neutron', fake_is_neutron)
uuid1 = uuidutils.generate_uuid()
uuid2 = uuidutils.generate_uuid()
self._create_vm(node=self.node_name, num_instances=1,
uuid=uuid1)
info = self.conn.get_info({'uuid': uuid1,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.conn.destroy(self.context, self.instance, self.network_info)
self._create_vm(node=self.node_name2, num_instances=1,
uuid=uuid2)
info = self.conn.get_info({'uuid': uuid2,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
def test_finish_migration_power_on(self):
self._test_finish_migration(power_on=True)
self.assertEqual(True, self.power_on_called)
def test_finish_migration_power_off(self):
self._test_finish_migration(power_on=False)
self.assertEqual(False, self.power_on_called)
def test_finish_migration_power_on_resize(self):
self._test_finish_migration(power_on=True,
resize_instance=True)
self.assertEqual(True, self.power_on_called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(power_on=True)
self.assertEqual(True, self.power_on_called)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(power_on=False)
self.assertEqual(False, self.power_on_called)
def test_snapshot(self):
# Ensure VMwareVCVMOps's get_copy_virtual_disk_spec is getting called
# two times
self.mox.StubOutWithMock(vmops.VMwareVCVMOps,
'get_copy_virtual_disk_spec')
self.conn._vmops.get_copy_virtual_disk_spec(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
self.conn._vmops.get_copy_virtual_disk_spec(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
self.mox.ReplayAll()
self._create_vm()
self._test_snapshot()
def test_snapshot_using_file_manager(self):
self._create_vm()
uuid_str = uuidutils.generate_uuid()
self.mox.StubOutWithMock(uuidutils,
'generate_uuid')
uuidutils.generate_uuid().AndReturn(uuid_str)
self.mox.StubOutWithMock(ds_util, 'file_delete')
# Check calls for delete vmdk and -flat.vmdk pair
ds_util.file_delete(mox.IgnoreArg(),
"[%s] vmware_temp/%s-flat.vmdk" % (self.ds, uuid_str),
mox.IgnoreArg()).AndReturn(None)
ds_util.file_delete(mox.IgnoreArg(),
"[%s] vmware_temp/%s.vmdk" % (self.ds, uuid_str),
mox.IgnoreArg()).AndReturn(None)
self.mox.ReplayAll()
self._test_snapshot()
def test_spawn_invalid_node(self):
self._create_instance(node='InvalidNodeName')
self.assertRaises(exception.NotFound, self.conn.spawn,
self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=None)
def test_spawn_with_sparse_image(self):
# Only a sparse disk image triggers the copy
self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties')
result = [1024, {"vmware_ostype": "otherGuest",
"vmware_adaptertype": "lsiLogic",
"vmware_disktype": "sparse"}]
vmware_images.get_vmdk_size_and_properties(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(result)
# Ensure VMwareVCVMOps's get_copy_virtual_disk_spec is getting called
# two times
self.mox.StubOutWithMock(vmops.VMwareVCVMOps,
'get_copy_virtual_disk_spec')
self.conn._vmops.get_copy_virtual_disk_spec(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
self.conn._vmops.get_copy_virtual_disk_spec(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
self.mox.ReplayAll()
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
def test_plug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self._create_instance()
self.assertRaises(NotImplementedError,
self.conn.plug_vifs,
instance=self.instance, network_info=None)
def test_unplug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self._create_instance()
self.assertRaises(NotImplementedError,
self.conn.unplug_vifs,
instance=self.instance, network_info=None)
def test_migrate_disk_and_power_off(self):
def fake_update_instance_progress(context, instance, step,
total_steps):
pass
def fake_get_host_ref_from_name(dest):
return None
self._create_vm()
vm_ref_orig = vm_util.get_vm_ref(self.conn._session, self.instance)
flavor = {'name': 'fake', 'flavorid': 'fake_id'}
self.stubs.Set(self.conn._vmops, "_update_instance_progress",
fake_update_instance_progress)
self.stubs.Set(self.conn._vmops, "_get_host_ref_from_name",
fake_get_host_ref_from_name)
self.conn.migrate_disk_and_power_off(self.context, self.instance,
'fake_dest', flavor,
None)
vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
self.assertNotEqual(vm_ref_orig.value, vm_ref.value,
"These should be different")
def test_disassociate_vmref_from_instance(self):
self._create_vm()
vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
vm_util.disassociate_vmref_from_instance(self.conn._session,
self.instance, vm_ref, "-backup")
self.assertRaises(exception.InstanceNotFound,
vm_util.get_vm_ref, self.conn._session, self.instance)
def test_clone_vmref_for_instance(self):
self._create_vm()
vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
vm_util.disassociate_vmref_from_instance(self.conn._session,
self.instance, vm_ref, "-backup")
host_ref = vmwareapi_fake._get_object_refs("HostSystem")[0]
ds_ref = vmwareapi_fake._get_object_refs("Datastore")[0]
dc_obj = vmwareapi_fake._get_objects("Datacenter").objects[0]
vm_util.clone_vmref_for_instance(self.conn._session, self.instance,
vm_ref, host_ref, ds_ref,
dc_obj.get("vmFolder"))
self.assertIsNotNone(
vm_util.get_vm_ref(self.conn._session, self.instance),
"No VM found")
cloned_vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
self.assertNotEqual(vm_ref.value, cloned_vm_ref.value,
"Reference for the cloned VM should be different")
vm_obj = vmwareapi_fake._get_vm_mdo(vm_ref)
cloned_vm_obj = vmwareapi_fake._get_vm_mdo(cloned_vm_ref)
self.assertEqual(vm_obj.name, self.instance['uuid'] + "-backup",
"Original VM name should be with suffix -backup")
self.assertEqual(cloned_vm_obj.name, self.instance['uuid'],
"VM name does not match instance['uuid']")
self.assertRaises(error_util.MissingParameter,
vm_util.clone_vmref_for_instance, self.conn._session,
self.instance, None, host_ref, ds_ref,
dc_obj.get("vmFolder"))
def test_associate_vmref_for_instance(self):
self._create_vm()
vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
# First disassociate the VM from the instance so that we have a VM
# to later associate using the associate_vmref_for_instance method
vm_util.disassociate_vmref_from_instance(self.conn._session,
self.instance, vm_ref, "-backup")
# Ensure that the VM is indeed disassociated and that we cannot find
# the VM using the get_vm_ref method
self.assertRaises(exception.InstanceNotFound,
vm_util.get_vm_ref, self.conn._session, self.instance)
# Associate the VM back to the instance
vm_util.associate_vmref_for_instance(self.conn._session, self.instance,
suffix="-backup")
# Verify if we can get the VM reference
self.assertIsNotNone(
vm_util.get_vm_ref(self.conn._session, self.instance),
"No VM found")
def test_confirm_migration(self):
self._create_vm()
self.conn.confirm_migration(self.context, self.instance, None)
def test_spawn_attach_volume_vmdk(self):
self._spawn_attach_volume_vmdk(vc_support=True)
def test_spawn_attach_volume_vmdk_no_image_ref(self):
self._spawn_attach_volume_vmdk(set_image_ref=False, vc_support=True)
def test_pause(self):
# Tests that the VMwareVCDriver does not implement the pause method.
self._create_instance()
self.assertRaises(NotImplementedError, self.conn.pause, self.instance)
def test_unpause(self):
# Tests that the VMwareVCDriver does not implement the unpause method.
self._create_instance()
self.assertRaises(NotImplementedError, self.conn.unpause,
self.instance)
def test_datastore_dc_map(self):
vmops = self.conn._resources[self.node_name]['vmops']
self.assertEqual({}, vmops._datastore_dc_mapping)
self._create_vm()
# currently there are 2 data stores
self.assertEqual(2, len(vmops._datastore_dc_mapping))
def test_rollback_live_migration_at_destination(self):
with mock.patch.object(self.conn, "destroy") as mock_destroy:
self.conn.rollback_live_migration_at_destination(self.context,
"instance", [], None)
mock_destroy.assert_called_once_with(self.context,
"instance", [], None)
|
GHSA-jv34-xvjq-ppch
|
nova/virt/vmwareapi/vmops.py
|
@@ -29,6 +29,7 @@
from nova import compute
from nova.compute import power_state
from nova.compute import task_states
+from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova.network import model as network_model
@@ -966,13 +967,9 @@ def reboot(self, instance, network_info):
self._session._wait_for_task(reset_task)
LOG.debug(_("Did hard reboot of VM"), instance=instance)
- def destroy(self, instance, network_info, destroy_disks=True,
- instance_name=None):
- """Destroy a VM instance. Steps followed are:
- 1. Power off the VM, if it is in poweredOn state.
- 2. Un-register a VM.
- 3. Delete the contents of the folder holding the VM related data.
- """
+ def _destroy_instance(self, instance, network_info, destroy_disks=True,
+ instance_name=None):
+ # Destroy a VM instance
# Get the instance name. In some cases this may differ from the 'uuid',
# for example when the spawn of a rescue instance takes place.
if not instance_name:
@@ -1010,8 +1007,9 @@ def destroy(self, instance, network_info, destroy_disks=True,
"UnregisterVM", vm_ref)
LOG.debug(_("Unregistered the VM"), instance=instance)
except Exception as excep:
- LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
- " while un-registering the VM: %s") % str(excep))
+ LOG.warn(_("In vmwareapi:vmops:_destroy_instance, got this "
+ "exception while un-registering the VM: %s"),
+ excep)
# Delete the folder holding the VM related content on
# the datastore.
if destroy_disks and datastore_name:
@@ -1034,15 +1032,39 @@ def destroy(self, instance, network_info, destroy_disks=True,
{'datastore_name': datastore_name},
instance=instance)
except Exception as excep:
- LOG.warn(_("In vmwareapi:vmops:destroy, "
- "got this exception while deleting"
- " the VM contents from the disk: %s")
- % str(excep))
+ LOG.warn(_("In vmwareapi:vmops:_destroy_instance, "
+ "got this exception while deleting "
+ "the VM contents from the disk: %s"),
+ excep)
except Exception as exc:
LOG.exception(exc, instance=instance)
finally:
vm_util.vm_ref_cache_delete(instance_name)
+ def destroy(self, instance, network_info, destroy_disks=True):
+ """Destroy a VM instance.
+
+ Steps followed for each VM are:
+ 1. Power off, if it is in poweredOn state.
+ 2. Un-register.
+ 3. Delete the contents of the folder holding the VM related data.
+ """
+ # If there is a rescue VM then we need to destroy that one too.
+ LOG.debug(_("Destroying instance"), instance=instance)
+ if instance['vm_state'] == vm_states.RESCUED:
+ LOG.debug(_("Rescue VM configured"), instance=instance)
+ try:
+ self.unrescue(instance, power_on=False)
+ LOG.debug(_("Rescue VM destroyed"), instance=instance)
+ except Exception:
+ rescue_name = instance['uuid'] + self._rescue_suffix
+ self._destroy_instance(instance, network_info,
+ destroy_disks=destroy_disks,
+ instance_name=rescue_name)
+ self._destroy_instance(instance, network_info,
+ destroy_disks=destroy_disks)
+ LOG.debug(_("Instance destroyed"), instance=instance)
+
def pause(self, instance):
msg = _("pause not supported for vmwareapi")
raise NotImplementedError(msg)
@@ -1120,7 +1142,7 @@ def rescue(self, context, instance, network_info, image_meta):
adapter_type, disk_type, vmdk_path)
self._power_on(instance, vm_ref=rescue_vm_ref)
- def unrescue(self, instance):
+ def unrescue(self, instance, power_on=True):
"""Unrescue the specified instance."""
# Get the original vmdk_path
vm_ref = vm_util.get_vm_ref(self._session, instance)
@@ -1142,8 +1164,9 @@ def unrescue(self, instance):
device = vm_util.get_vmdk_volume_disk(hardware_devices, path=vmdk_path)
self._power_off_vm_ref(vm_rescue_ref)
self._volumeops.detach_disk_from_vm(vm_rescue_ref, r_instance, device)
- self.destroy(r_instance, None, instance_name=instance_name)
- self._power_on(instance)
+ self._destroy_instance(r_instance, None, instance_name=instance_name)
+ if power_on:
+ self._power_on(instance)
def _power_off_vm_ref(self, vm_ref):
"""Power off the specifed vm.
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for VM tasks like spawn, snapshot, suspend, resume etc.
"""
import collections
import copy
import os
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova import compute
from nova.compute import power_state
from nova.compute import task_states
from nova import context as nova_context
from nova import exception
from nova.network import model as network_model
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import utils
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmware_images
vmware_vif_opts = [
cfg.StrOpt('integration_bridge',
default='br-int',
help='Name of Integration Bridge'),
]
vmware_group = cfg.OptGroup(name='vmware',
title='VMware Options')
CONF = cfg.CONF
CONF.register_group(vmware_group)
CONF.register_opts(vmware_vif_opts, vmware_group)
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('remove_unused_base_images', 'nova.virt.imagecache')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('my_ip', 'nova.netconf')
LOG = logging.getLogger(__name__)
VMWARE_POWER_STATES = {
'poweredOff': power_state.SHUTDOWN,
'poweredOn': power_state.RUNNING,
'suspended': power_state.SUSPENDED}
VMWARE_LINKED_CLONE = 'vmware_linked_clone'
RESIZE_TOTAL_STEPS = 4
DcInfo = collections.namedtuple('DcInfo',
['ref', 'name', 'vmFolder'])
class VMwareVMOps(object):
"""Management class for VM-related tasks."""
def __init__(self, session, virtapi, volumeops, cluster=None,
datastore_regex=None):
"""Initializer."""
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops
self._cluster = cluster
self._datastore_regex = datastore_regex
# Ensure that the base folder is unique per compute node
if CONF.remove_unused_base_images:
self._base_folder = '%s%s' % (CONF.my_ip,
CONF.image_cache_subdirectory_name)
else:
# Aging disable ensures backward compatibility
self._base_folder = CONF.image_cache_subdirectory_name
self._tmp_folder = 'vmware_temp'
self._default_root_device = 'vda'
self._rescue_suffix = '-rescue'
self._migrate_suffix = '-orig'
self._poll_rescue_last_ran = None
self._is_neutron = utils.is_neutron()
self._datastore_dc_mapping = {}
self._datastore_browser_mapping = {}
self._imagecache = imagecache.ImageCacheManager(self._session,
self._base_folder)
def list_instances(self):
"""Lists the VM instances that are registered with the ESX host."""
LOG.debug(_("Getting list of instances"))
vms = self._session._call_method(vim_util, "get_objects",
"VirtualMachine",
["name", "runtime.connectionState"])
lst_vm_names = self._get_valid_vms_from_retrieve_result(vms)
LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
def _extend_virtual_disk(self, instance, requested_size, name, dc_ref):
service_content = self._session._get_vim().get_service_content()
LOG.debug(_("Extending root virtual disk to %s"), requested_size)
vmdk_extend_task = self._session._call_method(
self._session._get_vim(),
"ExtendVirtualDisk_Task",
service_content.virtualDiskManager,
name=name,
datacenter=dc_ref,
newCapacityKb=requested_size,
eagerZero=False)
try:
self._session._wait_for_task(vmdk_extend_task)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Extending virtual disk failed with error: %s'),
e, instance=instance)
# Clean up files created during the extend operation
files = [name.replace(".vmdk", "-flat.vmdk"), name]
for file in files:
self._delete_datastore_file(instance, file, dc_ref)
LOG.debug(_("Extended root virtual disk"))
def _delete_datastore_file(self, instance, datastore_path, dc_ref):
try:
ds_util.file_delete(self._session, datastore_path, dc_ref)
except (error_util.CannotDeleteFileException,
error_util.FileFaultException,
error_util.FileLockedException,
error_util.FileNotFoundException) as e:
LOG.debug(_("Unable to delete %(ds)s. There may be more than "
"one process or thread that tries to delete the file. "
"Exception: %(ex)s"),
{'ds': datastore_path, 'ex': e})
def _get_vmdk_path(self, ds_name, folder, name):
path = "%s/%s.vmdk" % (folder, name)
return ds_util.build_datastore_path(ds_name, path)
def _get_disk_format(self, image_meta):
disk_format = image_meta.get('disk_format')
if disk_format not in ['iso', 'vmdk', None]:
raise exception.InvalidDiskFormat(disk_format=disk_format)
return (disk_format, disk_format == 'iso')
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None,
instance_name=None, power_on=True):
"""Creates a VM instance.
Steps followed are:
1. Create a VM with no disk and the specifics in the instance object
like RAM size.
2. For flat disk
2.1. Create a dummy vmdk of the size of the disk file that is to be
uploaded. This is required just to create the metadata file.
2.2. Delete the -flat.vmdk file created in the above step and retain
the metadata .vmdk file.
2.3. Upload the disk file.
3. For sparse disk
3.1. Upload the disk file to a -sparse.vmdk file.
3.2. Copy/Clone the -sparse.vmdk file to a thin vmdk.
3.3. Delete the -sparse.vmdk file.
4. Attach the disk to the VM by reconfiguring the same.
5. Power on the VM.
"""
ebs_root = False
if block_device_info:
LOG.debug(_("Block device information present: %s")
% block_device_info, instance=instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if block_device_mapping:
ebs_root = True
(file_type, is_iso) = self._get_disk_format(image_meta)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
ds = vm_util.get_datastore_ref_and_name(self._session, self._cluster,
datastore_regex=self._datastore_regex)
data_store_ref = ds[0]
data_store_name = ds[1]
dc_info = self.get_datacenter_ref_and_name(data_store_ref)
#TODO(hartsocks): this pattern is confusing, reimplement as methods
# The use of nested functions in this file makes for a confusing and
# hard to maintain file. At some future date, refactor this method to
# be a full-fledged method. This will also make unit testing easier.
def _get_image_properties(root_size):
"""Get the Size of the flat vmdk file that is there on the storage
repository.
"""
image_ref = instance.get('image_ref')
if image_ref:
_image_info = vmware_images.get_vmdk_size_and_properties(
context, image_ref, instance)
else:
# The case that the image may be booted from a volume
_image_info = (root_size, {})
image_size, image_properties = _image_info
vmdk_file_size_in_kb = int(image_size) / 1024
os_type = image_properties.get("vmware_ostype", "otherGuest")
adapter_type = image_properties.get("vmware_adaptertype",
"lsiLogic")
disk_type = image_properties.get("vmware_disktype",
"preallocated")
# Get the network card type from the image properties.
vif_model = image_properties.get("hw_vif_model",
network_model.VIF_MODEL_E1000)
# Fetch the image_linked_clone data here. It is retrieved
# with the above network based API call. To retrieve it
# later will necessitate additional network calls using the
# identical method. Consider this a cache.
image_linked_clone = image_properties.get(VMWARE_LINKED_CLONE)
return (vmdk_file_size_in_kb, os_type, adapter_type, disk_type,
vif_model, image_linked_clone)
root_gb = instance['root_gb']
root_gb_in_kb = root_gb * units.Mi
(vmdk_file_size_in_kb, os_type, adapter_type, disk_type, vif_model,
image_linked_clone) = _get_image_properties(root_gb_in_kb)
if root_gb_in_kb and vmdk_file_size_in_kb > root_gb_in_kb:
reason = _("Image disk size greater than requested disk size")
raise exception.InstanceUnacceptable(instance_id=instance['uuid'],
reason=reason)
node_mo_id = vm_util.get_mo_id_from_instance(instance)
res_pool_ref = vm_util.get_res_pool_ref(self._session,
self._cluster, node_mo_id)
def _get_vif_infos():
vif_infos = []
if network_info is None:
return vif_infos
for vif in network_info:
mac_address = vif['address']
network_name = vif['network']['bridge'] or \
CONF.vmware.integration_bridge
network_ref = vmwarevif.get_network_ref(self._session,
self._cluster,
vif,
self._is_neutron)
vif_infos.append({'network_name': network_name,
'mac_address': mac_address,
'network_ref': network_ref,
'iface_id': vif['id'],
'vif_model': vif_model
})
return vif_infos
vif_infos = _get_vif_infos()
# Get the instance name. In some cases this may differ from the 'uuid',
# for example when the spawn of a rescue instance takes place.
if not instance_name:
instance_name = instance['uuid']
# Get the create vm config spec
config_spec = vm_util.get_vm_create_spec(
client_factory, instance, instance_name,
data_store_name, vif_infos, os_type)
def _execute_create_vm():
"""Create VM on ESX host."""
LOG.debug(_("Creating VM on the ESX host"), instance=instance)
# Create the VM on the ESX host
vm_create_task = self._session._call_method(
self._session._get_vim(),
"CreateVM_Task", dc_info.vmFolder,
config=config_spec, pool=res_pool_ref)
self._session._wait_for_task(vm_create_task)
LOG.debug(_("Created VM on the ESX host"), instance=instance)
_execute_create_vm()
# In the case of a rescue disk the instance_name is not the same as
# instance UUID. In this case the VM reference is accessed via the
# instance name.
if instance_name != instance['uuid']:
vm_ref = vm_util.get_vm_ref_from_name(self._session,
instance_name)
else:
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
if CONF.flat_injected:
self._set_machine_id(client_factory, instance, network_info)
# Set the vnc configuration of the instance, vnc port starts from 5900
if CONF.vnc_enabled:
vnc_port = vm_util.get_vnc_port(self._session)
self._set_vnc_config(client_factory, instance, vnc_port)
def _create_virtual_disk(virtual_disk_path, file_size_in_kb):
"""Create a virtual disk of the size of flat vmdk file."""
# Create a Virtual Disk of the size of the flat vmdk file. This is
# done just to generate the meta-data file whose specifics
# depend on the size of the disk, thin/thick provisioning and the
# storage adapter type.
# Here we assume thick provisioning and lsiLogic for the adapter
# type
LOG.debug(_("Creating Virtual Disk of size "
"%(vmdk_file_size_in_kb)s KB and adapter type "
"%(adapter_type)s on the ESX host local store "
"%(data_store_name)s"),
{"vmdk_file_size_in_kb": file_size_in_kb,
"adapter_type": adapter_type,
"data_store_name": data_store_name},
instance=instance)
vmdk_create_spec = vm_util.get_vmdk_create_spec(client_factory,
file_size_in_kb, adapter_type,
disk_type)
vmdk_create_task = self._session._call_method(
self._session._get_vim(),
"CreateVirtualDisk_Task",
service_content.virtualDiskManager,
name=virtual_disk_path,
datacenter=dc_info.ref,
spec=vmdk_create_spec)
self._session._wait_for_task(vmdk_create_task)
LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
" KB and type %(disk_type)s on "
"the ESX host local store %(data_store_name)s") %
{"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
"disk_type": disk_type,
"data_store_name": data_store_name},
instance=instance)
def _fetch_image_on_datastore(upload_name):
"""Fetch image from Glance to datastore."""
LOG.debug(_("Downloading image file data %(image_ref)s to the "
"data store %(data_store_name)s") %
{'image_ref': instance['image_ref'],
'data_store_name': data_store_name},
instance=instance)
vmware_images.fetch_image(
context,
instance['image_ref'],
instance,
host=self._session._host_ip,
data_center_name=dc_info.name,
datastore_name=data_store_name,
cookies=cookies,
file_path=upload_name)
LOG.debug(_("Downloaded image file data %(image_ref)s to "
"%(upload_name)s on the data store "
"%(data_store_name)s") %
{'image_ref': instance['image_ref'],
'upload_name': upload_name,
'data_store_name': data_store_name},
instance=instance)
def _copy_virtual_disk(source, dest):
"""Copy a sparse virtual disk to a thin virtual disk."""
# Copy a sparse virtual disk to a thin virtual disk. This is also
# done to generate the meta-data file whose specifics
# depend on the size of the disk, thin/thick provisioning and the
# storage adapter type.
LOG.debug(_("Copying Virtual Disk of size "
"%(vmdk_file_size_in_kb)s KB and adapter type "
"%(adapter_type)s on the ESX host local store "
"%(data_store_name)s to disk type %(disk_type)s") %
{"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
"adapter_type": adapter_type,
"data_store_name": data_store_name,
"disk_type": disk_type},
instance=instance)
vmdk_copy_spec = self.get_copy_virtual_disk_spec(client_factory,
adapter_type,
disk_type)
vmdk_copy_task = self._session._call_method(
self._session._get_vim(),
"CopyVirtualDisk_Task",
service_content.virtualDiskManager,
sourceName=source,
sourceDatacenter=dc_info.ref,
destName=dest,
destSpec=vmdk_copy_spec)
self._session._wait_for_task(vmdk_copy_task)
LOG.debug(_("Copied Virtual Disk of size %(vmdk_file_size_in_kb)s"
" KB and type %(disk_type)s on "
"the ESX host local store %(data_store_name)s") %
{"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
"disk_type": disk_type,
"data_store_name": data_store_name},
instance=instance)
if not ebs_root:
# this logic allows for instances or images to decide
# for themselves which strategy is best for them.
linked_clone = VMwareVMOps.decide_linked_clone(
image_linked_clone,
CONF.vmware.use_linked_clone
)
upload_name = instance['image_ref']
upload_folder = '%s/%s' % (self._base_folder, upload_name)
# The vmdk meta-data file
uploaded_file_name = "%s/%s.%s" % (upload_folder, upload_name,
file_type)
uploaded_file_path = ds_util.build_datastore_path(data_store_name,
uploaded_file_name)
session_vim = self._session._get_vim()
cookies = session_vim.client.options.transport.cookiejar
ds_browser = self._get_ds_browser(data_store_ref)
upload_file_name = upload_name + ".%s" % file_type
# Check if the timestamp file exists - if so then delete it. This
# will ensure that the aging will not delete a cache image if it
# is going to be used now.
if CONF.remove_unused_base_images:
ds_path = ds_util.build_datastore_path(data_store_name,
self._base_folder)
path = self._imagecache.timestamp_folder_get(ds_path,
upload_name)
# Lock to ensure that the spawn will not try and access a image
# that is currently being deleted on the datastore.
with lockutils.lock(path, lock_file_prefix='nova-vmware-ts',
external=True):
self._imagecache.timestamp_cleanup(dc_info.ref, ds_browser,
data_store_ref, data_store_name, path)
# Check if the image exists in the datastore cache. If not the
# image will be uploaded and cached.
if not (self._check_if_folder_file_exists(ds_browser,
data_store_ref, data_store_name,
upload_folder, upload_file_name)):
# Upload will be done to the self._tmp_folder and then moved
# to the self._base_folder
tmp_upload_folder = '%s/%s' % (self._tmp_folder,
uuidutils.generate_uuid())
upload_folder = '%s/%s' % (tmp_upload_folder, upload_name)
# Naming the VM files in correspondence with the VM instance
# The flat vmdk file name
flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (
upload_folder, upload_name)
# The sparse vmdk file name for sparse disk image
sparse_uploaded_vmdk_name = "%s/%s-sparse.vmdk" % (
upload_folder, upload_name)
flat_uploaded_vmdk_path = ds_util.build_datastore_path(
data_store_name,
flat_uploaded_vmdk_name)
sparse_uploaded_vmdk_path = ds_util.build_datastore_path(
data_store_name,
sparse_uploaded_vmdk_name)
upload_file_name = "%s/%s.%s" % (upload_folder, upload_name,
file_type)
upload_path = ds_util.build_datastore_path(data_store_name,
upload_file_name)
if not is_iso:
if disk_type != "sparse":
# Create a flat virtual disk and retain the metadata
# file. This will be done in the unique temporary
# directory.
ds_util.mkdir(self._session,
ds_util.build_datastore_path(
data_store_name, upload_folder),
dc_info.ref)
_create_virtual_disk(upload_path,
vmdk_file_size_in_kb)
self._delete_datastore_file(instance,
flat_uploaded_vmdk_path,
dc_info.ref)
upload_file_name = flat_uploaded_vmdk_name
else:
upload_file_name = sparse_uploaded_vmdk_name
_fetch_image_on_datastore(upload_file_name)
if not is_iso and disk_type == "sparse":
# Copy the sparse virtual disk to a thin virtual disk.
disk_type = "thin"
_copy_virtual_disk(sparse_uploaded_vmdk_path, upload_path)
self._delete_datastore_file(instance,
sparse_uploaded_vmdk_path,
dc_info.ref)
base_folder = '%s/%s' % (self._base_folder, upload_name)
dest_folder = ds_util.build_datastore_path(data_store_name,
base_folder)
src_folder = ds_util.build_datastore_path(data_store_name,
upload_folder)
try:
ds_util.file_move(self._session, dc_info.ref,
src_folder, dest_folder)
except error_util.FileAlreadyExistsException:
# File move has failed. This may be due to the fact that a
# process or thread has already completed the opertaion.
# In the event of a FileAlreadyExists we continue,
# all other exceptions will be raised.
LOG.debug(_("File %s already exists"), dest_folder)
# Delete the temp upload folder
self._delete_datastore_file(instance,
ds_util.build_datastore_path(data_store_name,
tmp_upload_folder),
dc_info.ref)
else:
# linked clone base disk exists
if disk_type == "sparse":
disk_type = "thin"
if is_iso:
if root_gb_in_kb:
dest_vmdk_path = self._get_vmdk_path(data_store_name,
instance['uuid'], instance_name)
# Create the blank virtual disk for the VM
_create_virtual_disk(dest_vmdk_path, root_gb_in_kb)
root_vmdk_path = dest_vmdk_path
else:
root_vmdk_path = None
else:
# Extend the disk size if necessary
if not linked_clone:
# If we are not using linked_clone, copy the image from
# the cache into the instance directory. If we are using
# linked clone it is references from the cache directory
dest_vmdk_path = self._get_vmdk_path(data_store_name,
instance_name, instance_name)
_copy_virtual_disk(uploaded_file_path, dest_vmdk_path)
root_vmdk_path = dest_vmdk_path
if root_gb_in_kb > vmdk_file_size_in_kb:
self._extend_virtual_disk(instance, root_gb_in_kb,
root_vmdk_path, dc_info.ref)
else:
upload_folder = '%s/%s' % (self._base_folder, upload_name)
root_vmdk_name = "%s/%s.%s.vmdk" % (upload_folder,
upload_name,
root_gb)
root_vmdk_path = ds_util.build_datastore_path(
data_store_name, root_vmdk_name)
if not self._check_if_folder_file_exists(ds_browser,
data_store_ref, data_store_name,
upload_folder,
upload_name + ".%s.vmdk" % root_gb):
LOG.debug(_("Copying root disk of size %sGb"), root_gb)
try:
_copy_virtual_disk(uploaded_file_path,
root_vmdk_path)
except Exception as e:
LOG.warning(_("Root disk file creation "
"failed - %s"), e)
if root_gb_in_kb > vmdk_file_size_in_kb:
self._extend_virtual_disk(instance, root_gb_in_kb,
root_vmdk_path,
dc_info.ref)
# Attach the root disk to the VM.
if root_vmdk_path:
self._volumeops.attach_disk_to_vm(
vm_ref, instance,
adapter_type, disk_type, root_vmdk_path,
root_gb_in_kb, linked_clone)
if is_iso:
self._attach_cdrom_to_vm(
vm_ref, instance,
data_store_ref,
uploaded_file_path)
if configdrive.required_by(instance):
uploaded_iso_path = self._create_config_drive(instance,
injected_files,
admin_password,
data_store_name,
dc_info.name,
instance['uuid'],
cookies)
uploaded_iso_path = ds_util.build_datastore_path(
data_store_name,
uploaded_iso_path)
self._attach_cdrom_to_vm(
vm_ref, instance,
data_store_ref,
uploaded_iso_path)
else:
# Attach the root disk to the VM.
for root_disk in block_device_mapping:
connection_info = root_disk['connection_info']
self._volumeops.attach_root_volume(connection_info, instance,
self._default_root_device,
data_store_ref)
def _power_on_vm():
"""Power on the VM."""
LOG.debug(_("Powering on the VM instance"), instance=instance)
# Power On the VM
power_on_task = self._session._call_method(
self._session._get_vim(),
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(power_on_task)
LOG.debug(_("Powered on the VM instance"), instance=instance)
if power_on:
_power_on_vm()
def _create_config_drive(self, instance, injected_files, admin_password,
data_store_name, dc_name, upload_folder, cookies):
if CONF.config_drive_format != 'iso9660':
reason = (_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
raise exception.InstancePowerOnFailure(reason=reason)
LOG.info(_('Using config drive for instance'), instance=instance)
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md)
try:
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive.iso')
cdb.make_drive(tmp_file)
upload_iso_path = "%s/configdrive.iso" % (
upload_folder)
vmware_images.upload_iso_to_datastore(
tmp_file, instance,
host=self._session._host_ip,
data_center_name=dc_name,
datastore_name=data_store_name,
cookies=cookies,
file_path=upload_iso_path)
return upload_iso_path
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed with error: %s'),
e, instance=instance)
def _attach_cdrom_to_vm(self, vm_ref, instance,
datastore, file_path):
"""Attach cdrom to VM by reconfiguration."""
instance_name = instance['name']
instance_uuid = instance['uuid']
client_factory = self._session._get_vim().client.factory
devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
client_factory,
devices,
'ide')
cdrom_attach_config_spec = vm_util.get_cdrom_attach_config_spec(
client_factory, datastore, file_path,
controller_key, unit_number)
if controller_spec:
cdrom_attach_config_spec.deviceChange.append(controller_spec)
LOG.debug(_("Reconfiguring VM instance %(instance_name)s to attach "
"cdrom %(file_path)s"),
{'instance_name': instance_name, 'file_path': file_path})
reconfig_task = self._session._call_method(
self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=cdrom_attach_config_spec)
self._session._wait_for_task(reconfig_task)
LOG.debug(_("Reconfigured VM instance %(instance_name)s to attach "
"cdrom %(file_path)s"),
{'instance_name': instance_name, 'file_path': file_path})
@staticmethod
def decide_linked_clone(image_linked_clone, global_linked_clone):
"""Explicit decision logic: whether to use linked clone on a vmdk.
This is *override* logic not boolean logic.
1. let the image over-ride if set at all
2. default to the global setting
In math terms, I need to allow:
glance image to override global config.
That is g vs c. "g" for glance. "c" for Config.
So, I need g=True vs c=False to be True.
And, I need g=False vs c=True to be False.
And, I need g=None vs c=True to be True.
Some images maybe independently best tuned for use_linked_clone=True
saving datastorage space. Alternatively a whole OpenStack install may
be tuned to performance use_linked_clone=False but a single image
in this environment may be best configured to save storage space and
set use_linked_clone=True only for itself.
The point is: let each layer of control override the layer beneath it.
rationale:
For technical discussion on the clone strategies and their trade-offs
see: https://www.vmware.com/support/ws5/doc/ws_clone_typeofclone.html
:param image_linked_clone: boolean or string or None
:param global_linked_clone: boolean or string or None
:return: Boolean
"""
value = None
# Consider the values in order of override.
if image_linked_clone is not None:
value = image_linked_clone
else:
# this will never be not-set by this point.
value = global_linked_clone
return strutils.bool_from_string(value)
def get_copy_virtual_disk_spec(self, client_factory, adapter_type,
disk_type):
return vm_util.get_copy_virtual_disk_spec(client_factory,
adapter_type,
disk_type)
def _create_vm_snapshot(self, instance, vm_ref):
LOG.debug(_("Creating Snapshot of the VM instance"), instance=instance)
snapshot_task = self._session._call_method(
self._session._get_vim(),
"CreateSnapshot_Task", vm_ref,
name="%s-snapshot" % instance['uuid'],
description="Taking Snapshot of the VM",
memory=False,
quiesce=True)
self._session._wait_for_task(snapshot_task)
LOG.debug(_("Created Snapshot of the VM instance"), instance=instance)
task_info = self._session._call_method(vim_util,
"get_dynamic_property",
snapshot_task, "Task", "info")
snapshot = task_info.result
return snapshot
def _delete_vm_snapshot(self, instance, vm_ref, snapshot):
LOG.debug(_("Deleting Snapshot of the VM instance"), instance=instance)
delete_snapshot_task = self._session._call_method(
self._session._get_vim(),
"RemoveSnapshot_Task", snapshot,
removeChildren=False, consolidate=True)
self._session._wait_for_task(delete_snapshot_task)
LOG.debug(_("Deleted Snapshot of the VM instance"), instance=instance)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
Steps followed are:
1. Get the name of the vmdk file which the VM points to right now.
Can be a chain of snapshots, so we need to know the last in the
chain.
2. Create the snapshot. A new vmdk is created which the VM points to
now. The earlier vmdk becomes read-only.
3. Call CopyVirtualDisk which coalesces the disk chain to form a single
vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file.
4. Now upload the -flat.vmdk file to the image store.
5. Delete the coalesced .vmdk and -flat.vmdk created.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
def _get_vm_and_vmdk_attribs():
# Get the vmdk file name that the VM is pointing to
hw_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(vmdk_file_path_before_snapshot, adapter_type,
disk_type) = vm_util.get_vmdk_path_and_adapter_type(
hw_devices, uuid=instance['uuid'])
datastore_name = ds_util.split_datastore_path(
vmdk_file_path_before_snapshot)[0]
os_type = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "summary.config.guestId")
return (vmdk_file_path_before_snapshot, adapter_type, disk_type,
datastore_name, os_type)
(vmdk_file_path_before_snapshot, adapter_type, disk_type,
datastore_name, os_type) = _get_vm_and_vmdk_attribs()
snapshot = self._create_vm_snapshot(instance, vm_ref)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
def _check_if_tmp_folder_exists():
# Copy the contents of the VM that were there just before the
# snapshot was taken
ds_ref_ret = self._session._call_method(
vim_util, "get_dynamic_property", vm_ref, "VirtualMachine",
"datastore")
if ds_ref_ret is None:
raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
self.check_temp_folder(datastore_name, ds_ref)
return ds_ref
ds_ref = _check_if_tmp_folder_exists()
# Generate a random vmdk file name to which the coalesced vmdk content
# will be copied to. A random name is chosen so that we don't have
# name clashes.
random_name = uuidutils.generate_uuid()
dest_vmdk_file_path = ds_util.build_datastore_path(datastore_name,
"%s/%s.vmdk" % (self._tmp_folder, random_name))
dest_vmdk_data_file_path = ds_util.build_datastore_path(datastore_name,
"%s/%s-flat.vmdk" % (self._tmp_folder, random_name))
dc_info = self.get_datacenter_ref_and_name(ds_ref)
def _copy_vmdk_content():
# Consolidate the snapshotted disk to a temporary vmdk.
copy_spec = self.get_copy_virtual_disk_spec(client_factory,
adapter_type,
disk_type)
LOG.debug(_('Copying snapshotted disk %s.'),
vmdk_file_path_before_snapshot,
instance=instance)
copy_disk_task = self._session._call_method(
self._session._get_vim(),
"CopyVirtualDisk_Task",
service_content.virtualDiskManager,
sourceName=vmdk_file_path_before_snapshot,
sourceDatacenter=dc_info.ref,
destName=dest_vmdk_file_path,
destDatacenter=dc_info.ref,
destSpec=copy_spec,
force=False)
self._session._wait_for_task(copy_disk_task)
LOG.debug(_('Copied snapshotted disk %s.'),
vmdk_file_path_before_snapshot,
instance=instance)
_copy_vmdk_content()
# Note(vui): handle snapshot cleanup on exceptions.
self._delete_vm_snapshot(instance, vm_ref, snapshot)
cookies = self._session._get_vim().client.options.transport.cookiejar
def _upload_vmdk_to_image_repository():
# Upload the contents of -flat.vmdk file which has the disk data.
LOG.debug(_("Uploading image %s") % image_id,
instance=instance)
vmware_images.upload_image(
context,
image_id,
instance,
os_type=os_type,
disk_type="preallocated",
adapter_type=adapter_type,
image_version=1,
host=self._session._host_ip,
data_center_name=dc_info.name,
datastore_name=datastore_name,
cookies=cookies,
file_path="%s/%s-flat.vmdk" % (self._tmp_folder, random_name))
LOG.debug(_("Uploaded image %s") % image_id,
instance=instance)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
_upload_vmdk_to_image_repository()
def _clean_temp_data():
"""Delete temporary vmdk files generated in image handling
operations.
"""
# The data file is the one occupying space, and likelier to see
# deletion problems, so prioritize its deletion first. In the
# unlikely event that its deletion fails, the small descriptor file
# is retained too by design since it makes little sense to remove
# it when the data disk it refers to still lingers.
for f in dest_vmdk_data_file_path, dest_vmdk_file_path:
self._delete_datastore_file(instance, f, dc_info.ref)
_clean_temp_data()
def _get_values_from_object_properties(self, props, query):
while props:
token = vm_util._get_token(props)
for elem in props.objects:
for prop in elem.propSet:
for key in query.keys():
if prop.name == key:
query[key] = prop.val
break
if token:
props = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
def reboot(self, instance, network_info):
"""Reboot a VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
None, vm_ref, "VirtualMachine",
lst_properties)
query = {'runtime.powerState': None,
'summary.guest.toolsStatus': None,
'summary.guest.toolsRunningStatus': False}
self._get_values_from_object_properties(props, query)
pwr_state = query['runtime.powerState']
tools_status = query['summary.guest.toolsStatus']
tools_running_status = query['summary.guest.toolsRunningStatus']
# Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
reason = _("instance is not powered on")
raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
if (tools_status == "toolsOk" and
tools_running_status == "guestToolsRunning"):
LOG.debug(_("Rebooting guest OS of VM"), instance=instance)
self._session._call_method(self._session._get_vim(), "RebootGuest",
vm_ref)
LOG.debug(_("Rebooted guest OS of VM"), instance=instance)
else:
LOG.debug(_("Doing hard reboot of VM"), instance=instance)
reset_task = self._session._call_method(self._session._get_vim(),
"ResetVM_Task", vm_ref)
self._session._wait_for_task(reset_task)
LOG.debug(_("Did hard reboot of VM"), instance=instance)
def destroy(self, instance, network_info, destroy_disks=True,
instance_name=None):
"""Destroy a VM instance. Steps followed are:
1. Power off the VM, if it is in poweredOn state.
2. Un-register a VM.
3. Delete the contents of the folder holding the VM related data.
"""
# Get the instance name. In some cases this may differ from the 'uuid',
# for example when the spawn of a rescue instance takes place.
if not instance_name:
instance_name = instance['uuid']
try:
vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
lst_properties = ["config.files.vmPathName", "runtime.powerState",
"datastore"]
props = self._session._call_method(vim_util,
"get_object_properties",
None, vm_ref, "VirtualMachine", lst_properties)
query = {'runtime.powerState': None,
'config.files.vmPathName': None,
'datastore': None}
self._get_values_from_object_properties(props, query)
pwr_state = query['runtime.powerState']
vm_config_pathname = query['config.files.vmPathName']
datastore_name = None
if vm_config_pathname:
_ds_path = ds_util.split_datastore_path(vm_config_pathname)
datastore_name, vmx_file_path = _ds_path
# Power off the VM if it is in PoweredOn state.
if pwr_state == "poweredOn":
LOG.debug(_("Powering off the VM"), instance=instance)
poweroff_task = self._session._call_method(
self._session._get_vim(),
"PowerOffVM_Task", vm_ref)
self._session._wait_for_task(poweroff_task)
LOG.debug(_("Powered off the VM"), instance=instance)
# Un-register the VM
try:
LOG.debug(_("Unregistering the VM"), instance=instance)
self._session._call_method(self._session._get_vim(),
"UnregisterVM", vm_ref)
LOG.debug(_("Unregistered the VM"), instance=instance)
except Exception as excep:
LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
" while un-registering the VM: %s") % str(excep))
# Delete the folder holding the VM related content on
# the datastore.
if destroy_disks and datastore_name:
try:
dir_ds_compliant_path = ds_util.build_datastore_path(
datastore_name,
os.path.dirname(vmx_file_path))
LOG.debug(_("Deleting contents of the VM from "
"datastore %(datastore_name)s") %
{'datastore_name': datastore_name},
instance=instance)
ds_ref_ret = query['datastore']
ds_ref = ds_ref_ret.ManagedObjectReference[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
ds_util.file_delete(self._session,
dir_ds_compliant_path,
dc_info.ref)
LOG.debug(_("Deleted contents of the VM from "
"datastore %(datastore_name)s") %
{'datastore_name': datastore_name},
instance=instance)
except Exception as excep:
LOG.warn(_("In vmwareapi:vmops:destroy, "
"got this exception while deleting"
" the VM contents from the disk: %s")
% str(excep))
except Exception as exc:
LOG.exception(exc, instance=instance)
finally:
vm_util.vm_ref_cache_delete(instance_name)
def pause(self, instance):
msg = _("pause not supported for vmwareapi")
raise NotImplementedError(msg)
def unpause(self, instance):
msg = _("unpause not supported for vmwareapi")
raise NotImplementedError(msg)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
# Only PoweredOn VMs can be suspended.
if pwr_state == "poweredOn":
LOG.debug(_("Suspending the VM"), instance=instance)
suspend_task = self._session._call_method(self._session._get_vim(),
"SuspendVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug(_("Suspended the VM"), instance=instance)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
reason = _("instance is powered off and cannot be suspended.")
raise exception.InstanceSuspendFailure(reason=reason)
else:
LOG.debug(_("VM was already in suspended state. So returning "
"without doing anything"), instance=instance)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
if pwr_state.lower() == "suspended":
LOG.debug(_("Resuming the VM"), instance=instance)
suspend_task = self._session._call_method(
self._session._get_vim(),
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug(_("Resumed the VM"), instance=instance)
else:
reason = _("instance is not in a suspended state")
raise exception.InstanceResumeFailure(reason=reason)
def rescue(self, context, instance, network_info, image_meta):
"""Rescue the specified instance.
- shutdown the instance VM.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
self.power_off(instance)
r_instance = copy.deepcopy(instance)
instance_name = r_instance['uuid'] + self._rescue_suffix
self.spawn(context, r_instance, image_meta,
None, None, network_info,
instance_name=instance_name,
power_on=False)
# Attach vmdk to the rescue VM
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(vmdk_path, adapter_type,
disk_type) = vm_util.get_vmdk_path_and_adapter_type(
hardware_devices, uuid=instance['uuid'])
rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session,
instance_name)
self._volumeops.attach_disk_to_vm(
rescue_vm_ref, r_instance,
adapter_type, disk_type, vmdk_path)
self._power_on(instance, vm_ref=rescue_vm_ref)
def unrescue(self, instance):
"""Unrescue the specified instance."""
# Get the original vmdk_path
vm_ref = vm_util.get_vm_ref(self._session, instance)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(vmdk_path, adapter_type,
disk_type) = vm_util.get_vmdk_path_and_adapter_type(
hardware_devices, uuid=instance['uuid'])
r_instance = copy.deepcopy(instance)
instance_name = r_instance['uuid'] + self._rescue_suffix
# detach the original instance disk from the rescue disk
vm_rescue_ref = vm_util.get_vm_ref_from_name(self._session,
instance_name)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_rescue_ref,
"VirtualMachine", "config.hardware.device")
device = vm_util.get_vmdk_volume_disk(hardware_devices, path=vmdk_path)
self._power_off_vm_ref(vm_rescue_ref)
self._volumeops.detach_disk_from_vm(vm_rescue_ref, r_instance, device)
self.destroy(r_instance, None, instance_name=instance_name)
self._power_on(instance)
def _power_off_vm_ref(self, vm_ref):
"""Power off the specifed vm.
:param vm_ref: a reference object to the VM.
"""
poweroff_task = self._session._call_method(
self._session._get_vim(),
"PowerOffVM_Task", vm_ref)
self._session._wait_for_task(poweroff_task)
def power_off(self, instance):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
# Only PoweredOn VMs can be powered off.
if pwr_state == "poweredOn":
LOG.debug(_("Powering off the VM"), instance=instance)
self._power_off_vm_ref(vm_ref)
LOG.debug(_("Powered off the VM"), instance=instance)
# Raise Exception if VM is suspended
elif pwr_state == "suspended":
reason = _("instance is suspended and cannot be powered off.")
raise exception.InstancePowerOffFailure(reason=reason)
else:
LOG.debug(_("VM was already in powered off state. So returning "
"without doing anything"), instance=instance)
def _power_on(self, instance, vm_ref=None):
"""Power on the specified instance."""
if not vm_ref:
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
if pwr_state == "poweredOn":
LOG.debug(_("VM was already in powered on state. So returning "
"without doing anything"), instance=instance)
# Only PoweredOff and Suspended VMs can be powered on.
else:
LOG.debug(_("Powering on the VM"), instance=instance)
poweron_task = self._session._call_method(
self._session._get_vim(),
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(poweron_task)
LOG.debug(_("Powered on the VM"), instance=instance)
def power_on(self, context, instance, network_info, block_device_info):
self._power_on(instance)
def _get_orig_vm_name_label(self, instance):
return instance['uuid'] + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the clone disk step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
instance_uuid = instance['uuid']
LOG.debug(_("Updating instance '%(instance_uuid)s' progress to"
" %(progress)d"),
{'instance_uuid': instance_uuid, 'progress': progress},
instance=instance)
self._virtapi.instance_update(context, instance_uuid,
{'progress': progress})
def migrate_disk_and_power_off(self, context, instance, dest,
flavor):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Read the host_ref for the destination. If this is None then the
# VC will decide on placement
host_ref = self._get_host_ref_from_name(dest)
# 1. Power off the instance
self.power_off(instance)
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Disassociate the linked vsphere VM from the instance
vm_util.disassociate_vmref_from_instance(self._session, instance,
vm_ref,
suffix=self._migrate_suffix)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
ds_ref = vm_util.get_datastore_ref_and_name(
self._session, self._cluster, host_ref,
datastore_regex=self._datastore_regex)[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
# 3. Clone the VM for instance
vm_util.clone_vmref_for_instance(self._session, instance, vm_ref,
host_ref, ds_ref, dc_info.vmFolder)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# Destroy the original VM. The vm_ref needs to be searched using the
# instance['uuid'] + self._migrate_suffix as the identifier. We will
# not get the vm when searched using the instanceUuid but rather will
# be found using the uuid buried in the extraConfig
vm_ref = vm_util.search_vm_ref_by_identifier(self._session,
instance['uuid'] + self._migrate_suffix)
if vm_ref is None:
LOG.debug(_("instance not present"), instance=instance)
return
try:
LOG.debug(_("Destroying the VM"), instance=instance)
destroy_task = self._session._call_method(
self._session._get_vim(),
"Destroy_Task", vm_ref)
self._session._wait_for_task(destroy_task)
LOG.debug(_("Destroyed the VM"), instance=instance)
except Exception as excep:
LOG.warn(_("In vmwareapi:vmops:confirm_migration, got this "
"exception while destroying the VM: %s") % str(excep))
def finish_revert_migration(self, context, instance, network_info,
block_device_info, power_on=True):
"""Finish reverting a resize."""
vm_util.associate_vmref_for_instance(self._session, instance,
suffix=self._migrate_suffix)
if power_on:
self._power_on(instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
if resize_instance:
client_factory = self._session._get_vim().client.factory
vm_ref = vm_util.get_vm_ref(self._session, instance)
vm_resize_spec = vm_util.get_vm_resize_spec(client_factory,
instance)
reconfig_task = self._session._call_method(
self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=vm_resize_spec)
self._session._wait_for_task(reconfig_task)
# 4. Start VM
if power_on:
self._power_on(instance)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False):
"""Spawning live_migration operation for distributing high-load."""
vm_ref = vm_util.get_vm_ref(self._session, instance_ref)
host_ref = self._get_host_ref_from_name(dest)
if host_ref is None:
raise exception.HostNotFound(host=dest)
LOG.debug(_("Migrating VM to host %s") % dest, instance=instance_ref)
try:
vm_migrate_task = self._session._call_method(
self._session._get_vim(),
"MigrateVM_Task", vm_ref,
host=host_ref,
priority="defaultPriority")
self._session._wait_for_task(vm_migrate_task)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance_ref, dest, block_migration)
post_method(context, instance_ref, dest, block_migration)
LOG.debug(_("Migrated VM to host %s") % dest, instance=instance_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info)
for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance):
"""Return data about the VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
"runtime.powerState"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = {'summary.config.numCpu': None,
'summary.config.memorySizeMB': None,
'runtime.powerState': None}
self._get_values_from_object_properties(vm_props, query)
max_mem = int(query['summary.config.memorySizeMB']) * 1024
return {'state': VMWARE_POWER_STATES[query['runtime.powerState']],
'max_mem': max_mem,
'mem': max_mem,
'num_cpu': int(query['summary.config.numCpu']),
'cpu_time': 0}
def _get_diagnostic_from_object_properties(self, props, wanted_props):
diagnostics = {}
while props:
for elem in props.objects:
for prop in elem.propSet:
if prop.name in wanted_props:
prop_dict = vim.object_to_dict(prop.val, list_depth=1)
diagnostics.update(prop_dict)
token = vm_util._get_token(props)
if not token:
break
props = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
return diagnostics
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config",
"summary.quickStats",
"summary.runtime"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
data = self._get_diagnostic_from_object_properties(vm_props,
set(lst_properties))
# Add a namespace to all of the diagnostsics
return dict([('vmware:' + k, v) for k, v in data.items()])
def _get_vnc_console_connection(self, instance):
"""Return connection info for a vnc console."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
opt_value = self._session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
vm_util.VNC_CONFIG_KEY)
if opt_value:
port = int(opt_value.value)
else:
raise exception.ConsoleTypeUnavailable(console_type='vnc')
return {'port': port,
'internal_access_path': None}
def get_vnc_console(self, instance):
"""Return connection info for a vnc console using ESX logic."""
vnc_console = self._get_vnc_console_connection(instance)
vnc_console['host'] = CONF.vmware.host_ip
return vnc_console
@staticmethod
def _get_machine_id_str(network_info):
machine_id_str = ''
for vif in network_info:
# TODO(vish): add support for dns2
# TODO(sateesh): add support for injection of ipv6 configuration
network = vif['network']
ip_v4 = netmask_v4 = gateway_v4 = broadcast_v4 = dns = None
subnets_v4 = [s for s in network['subnets'] if s['version'] == 4]
if len(subnets_v4) > 0:
if len(subnets_v4[0]['ips']) > 0:
ip_v4 = subnets_v4[0]['ips'][0]
if len(subnets_v4[0]['dns']) > 0:
dns = subnets_v4[0]['dns'][0]['address']
netmask_v4 = str(subnets_v4[0].as_netaddr().netmask)
gateway_v4 = subnets_v4[0]['gateway']['address']
broadcast_v4 = str(subnets_v4[0].as_netaddr().broadcast)
interface_str = ";".join([vif['address'],
ip_v4 and ip_v4['address'] or '',
netmask_v4 or '',
gateway_v4 or '',
broadcast_v4 or '',
dns or ''])
machine_id_str = machine_id_str + interface_str + '#'
return machine_id_str
def _set_machine_id(self, client_factory, instance, network_info):
"""Set the machine id of the VM for guest tools to pick up
and reconfigure the network interfaces.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
machine_id_change_spec = vm_util.get_machine_id_change_spec(
client_factory,
self._get_machine_id_str(network_info))
LOG.debug(_("Reconfiguring VM instance to set the machine id"),
instance=instance)
reconfig_task = self._session._call_method(self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=machine_id_change_spec)
self._session._wait_for_task(reconfig_task)
LOG.debug(_("Reconfigured VM instance to set the machine id"),
instance=instance)
def _set_vnc_config(self, client_factory, instance, port):
"""Set the vnc configuration of the VM."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
vnc_config_spec = vm_util.get_vnc_config_spec(
client_factory, port)
LOG.debug(_("Reconfiguring VM instance to enable vnc on "
"port - %(port)s") % {'port': port},
instance=instance)
reconfig_task = self._session._call_method(self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=vnc_config_spec)
self._session._wait_for_task(reconfig_task)
LOG.debug(_("Reconfigured VM instance to enable vnc on "
"port - %(port)s") % {'port': port},
instance=instance)
def _get_ds_browser(self, ds_ref):
ds_browser = self._datastore_browser_mapping.get(ds_ref)
if not ds_browser:
ds_browser = self._session._call_method(
vim_util, "get_dynamic_property", ds_ref, "Datastore",
"browser")
self._datastore_browser_mapping[ds_ref] = ds_browser
return ds_browser
def get_datacenter_ref_and_name(self, ds_ref):
"""Get the datacenter name and the reference."""
map = self._datastore_dc_mapping.get(ds_ref.value)
if not map:
dc_obj = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name"])
vm_util._cancel_retrieve_if_necessary(self._session, dc_obj)
map = DcInfo(ref=dc_obj.objects[0].obj,
name=dc_obj.objects[0].propSet[0].val,
vmFolder=self._get_vmfolder_ref())
self._datastore_dc_mapping[ds_ref.value] = map
return map
def _get_host_ref_from_name(self, host_name):
"""Get reference to the host with the name specified."""
host_objs = self._session._call_method(vim_util, "get_objects",
"HostSystem", ["name"])
vm_util._cancel_retrieve_if_necessary(self._session, host_objs)
for host in host_objs:
if hasattr(host, 'propSet'):
if host.propSet[0].val == host_name:
return host.obj
return None
def _get_vmfolder_ref(self):
"""Get the Vm folder ref from the datacenter."""
dc_objs = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["vmFolder"])
vm_util._cancel_retrieve_if_necessary(self._session, dc_objs)
# There is only one default datacenter in a standalone ESX host
vm_folder_ref = dc_objs.objects[0].propSet[0].val
return vm_folder_ref
def _create_folder_if_missing(self, ds_name, ds_ref, folder):
"""Create a folder if it does not exist.
Currently there are two folder that are required on the datastore
- base folder - the folder to store cached images
- temp folder - the folder used for snapshot management and
image uploading
This method is aimed to be used for the management of those
folders to ensure that they are created if they are missing.
The ds_util method mkdir will be used to check if the folder
exists. If this throws and exception 'FileAlreadyExistsException'
then the folder already exists on the datastore.
"""
path = ds_util.build_datastore_path(ds_name, folder)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
try:
ds_util.mkdir(self._session, path, dc_info.ref)
LOG.debug(_("Folder %s created."), path)
except error_util.FileAlreadyExistsException:
# NOTE(hartsocks): if the folder already exists, that
# just means the folder was prepped by another process.
pass
def check_cache_folder(self, ds_name, ds_ref):
"""Check that the cache folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._base_folder)
def check_temp_folder(self, ds_name, ds_ref):
"""Check that the temp folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._tmp_folder)
def _check_if_folder_file_exists(self, ds_browser, ds_ref, ds_name,
folder_name, file_name):
# Ensure that the cache folder exists
self.check_cache_folder(ds_name, ds_ref)
# Check if the file exists or not.
folder_path = ds_util.build_datastore_path(ds_name, folder_name)
file_exists = ds_util.file_exists(self._session, ds_browser,
folder_path, file_name)
return file_exists
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
client_factory = self._session._get_vim().client.factory
self._set_machine_id(client_factory, instance, network_info)
def manage_image_cache(self, context, instances):
if not CONF.remove_unused_base_images:
LOG.debug(_("Image aging disabled. Aging will not be done."))
return
datastores = vm_util.get_available_datastores(self._session,
self._cluster,
self._datastore_regex)
datastores_info = []
for ds in datastores:
ds_info = self.get_datacenter_ref_and_name(ds['ref'])
datastores_info.append((ds, ds_info))
self._imagecache.update(context, instances, datastores_info)
def _get_valid_vms_from_retrieve_result(self, retrieve_result):
"""Returns list of valid vms from RetrieveResult object."""
lst_vm_names = []
while retrieve_result:
token = vm_util._get_token(retrieve_result)
for vm in retrieve_result.objects:
vm_name = None
conn_state = None
for prop in vm.propSet:
if prop.name == "name":
vm_name = prop.val
elif prop.name == "runtime.connectionState":
conn_state = prop.val
# Ignoring the orphaned or inaccessible VMs
if conn_state not in ["orphaned", "inaccessible"]:
lst_vm_names.append(vm_name)
if token:
retrieve_result = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
return lst_vm_names
class VMwareVCVMOps(VMwareVMOps):
"""Management class for VM-related tasks.
Contains specializations to account for differences in vSphere API behavior
when invoked on Virtual Center instead of ESX host.
"""
def get_copy_virtual_disk_spec(self, client_factory, adapter_type,
disk_type):
LOG.debug(_("Will copy while retaining adapter type "
"%(adapter_type)s and disk type %(disk_type)s") %
{"disk_type": disk_type,
"adapter_type": adapter_type})
# Passing of the destination copy spec is not supported when
# VirtualDiskManager.CopyVirtualDisk is called on VC. The behavior of a
# spec-less copy is to consolidate to the target disk while keeping its
# disk and adapter type unchanged.
def _update_datacenter_cache_from_objects(self, dcs):
"""Updates the datastore/datacenter cache."""
while dcs:
token = vm_util._get_token(dcs)
for dco in dcs.objects:
name = None
vmFolder = None
dc_ref = dco.obj
ds_refs = []
for p in dco.propSet:
if p.name == 'name':
name = p.val
if p.name == 'datastore':
datastore_refs = p.val.ManagedObjectReference
for ds in datastore_refs:
ds_refs.append(ds.value)
if p.name == 'vmFolder':
vmFolder = p.val
for ds_ref in ds_refs:
self._datastore_dc_mapping[ds_ref] = DcInfo(ref=dc_ref,
name=name, vmFolder=vmFolder)
if token:
dcs = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
def get_datacenter_ref_and_name(self, ds_ref):
"""Get the datacenter name and the reference."""
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
if not dc_info:
dcs = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name", "datastore", "vmFolder"])
self._update_datacenter_cache_from_objects(dcs)
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
return dc_info
def list_instances(self):
"""Lists the VM instances that are registered with vCenter cluster."""
properties = ['name', 'runtime.connectionState']
LOG.debug(_("Getting list of instances from cluster %s"),
self._cluster)
vms = []
root_res_pool = self._session._call_method(
vim_util, "get_dynamic_property", self._cluster,
'ClusterComputeResource', 'resourcePool')
if root_res_pool:
vms = self._session._call_method(
vim_util, 'get_inner_objects', root_res_pool, 'vm',
'VirtualMachine', properties)
lst_vm_names = self._get_valid_vms_from_retrieve_result(vms)
LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
def get_vnc_console(self, instance):
"""Return connection info for a vnc console using vCenter logic."""
# vCenter does not run virtual machines and does not run
# a VNC proxy. Instead, you need to tell OpenStack to talk
# directly to the ESX host running the VM you are attempting
# to connect to via VNC.
vnc_console = self._get_vnc_console_connection(instance)
host_name = vm_util.get_host_name_for_vm(
self._session,
instance)
vnc_console['host'] = host_name
# NOTE: VM can move hosts in some situations. Debug for admins.
LOG.debug(_("VM %(uuid)s is currently on host %(host_name)s"),
{'uuid': instance['name'], 'host_name': host_name},
instance=instance)
return vnc_console
|
GHSA-jv34-xvjq-ppch
|
lollms/server/endpoints/lollms_configuration_infos.py
|
@@ -155,7 +155,7 @@ async def apply_settings(request: Request):
"debug_log_file_path",
"petals_model_path",
"skills_lib_database_name",
- "discussion_db_name"
+ "discussion_db_name",
"user_avatar",
]
|
"""
project: lollms
file: lollms_configuration_infos.py
author: ParisNeo
description:
This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI
application. These routes are specific to configurations
"""
from fastapi import APIRouter, Request, HTTPException
from pydantic import BaseModel
from json import JSONDecodeError
import pkg_resources
from lollms.server.elf_server import LOLLMSElfServer
from lollms.binding import BindingBuilder, InstallOption
from ascii_colors import ASCIIColors
from lollms.utilities import load_config, trace_exception, gc, show_yes_no_dialog
from lollms.security import check_access
from pathlib import Path
from typing import List
import json
from typing import List, Any
from lollms.security import sanitize_path, forbid_remote_access
class SettingsInfos(BaseModel):
setting_name:str
setting_value:str
# ----------------------- Defining router and main class ------------------------------
router = APIRouter()
lollmsElfServer = LOLLMSElfServer.get_instance()
# ----------------------------------- Settings -----------------------------------------
@router.get("/get_config")
def get_config():
"""
Get the configuration of the Lollms server.
Returns:
Config: The configuration object as a Pydantic model.
"""
return lollmsElfServer.config.to_dict()
@router.post("/update_setting")
async def update_setting(request: Request):
"""
Endpoint to apply configuration settings.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
# Prevent all outsiders from sending something to this endpoint
forbid_remote_access(lollmsElfServer)
try:
config_data = (await request.json())
check_access(lollmsElfServer, config_data["client_id"])
if "config" in config_data.keys():
config_data = config_data["config"]
setting_name = config_data["setting_name"]
setting_value = sanitize_path(config_data["setting_value"])
ASCIIColors.info(f"Requested updating of setting {setting_name} to {setting_value}")
if setting_name== "binding_name":
if lollmsElfServer.config['binding_name']!= setting_value:
print(f"New binding selected : {setting_value}")
lollmsElfServer.config["binding_name"]=setting_value
try:
if lollmsElfServer.binding:
lollmsElfServer.binding.destroy_model()
lollmsElfServer.binding = None
lollmsElfServer.model = None
for per in lollmsElfServer.mounted_personalities:
if per is not None:
per.model = None
gc.collect()
lollmsElfServer.binding = BindingBuilder().build_binding(lollmsElfServer.config, lollmsElfServer.lollms_paths, InstallOption.INSTALL_IF_NECESSARY, lollmsCom=lollmsElfServer)
lollmsElfServer.config.model_name = lollmsElfServer.binding.binding_config.model_name
lollmsElfServer.model = lollmsElfServer.binding.build_model()
for per in lollmsElfServer.mounted_personalities:
if per is not None:
per.model = lollmsElfServer.model
lollmsElfServer.config.save_config()
ASCIIColors.green("Binding loaded successfully")
except Exception as ex:
ASCIIColors.error(f"Couldn't build binding: [{ex}]")
trace_exception(ex)
return {"status":False, 'error':str(ex)}
else:
if lollmsElfServer.config["debug"]:
print(f"Configuration {setting_name} set to {setting_value}")
return {'setting_name': setting_name, "status":True}
elif setting_name == "model_name":
ASCIIColors.yellow(f"Changing model to: {setting_value}")
lollmsElfServer.config["model_name"]=setting_value
lollmsElfServer.config.save_config()
try:
lollmsElfServer.model = None
for per in lollmsElfServer.mounted_personalities:
if per is not None:
per.model = None
lollmsElfServer.binding.binding_config.model_name = lollmsElfServer.config.model_name
lollmsElfServer.model = lollmsElfServer.binding.build_model()
if lollmsElfServer.model is not None:
ASCIIColors.yellow("New model OK")
for per in lollmsElfServer.mounted_personalities:
if per is not None:
per.model = lollmsElfServer.model
except Exception as ex:
trace_exception(ex)
lollmsElfServer.InfoMessage(f"It looks like you we couldn't load the model.\nHere is the error message:\n{ex}")
else:
if setting_name in lollmsElfServer.config.config.keys():
lollmsElfServer.config[setting_name] = setting_value
else:
if lollmsElfServer.config["debug"]:
print(f"Configuration {setting_name} couldn't be set to {setting_value}")
return {'setting_name': setting_name, "status":False}
if lollmsElfServer.config["debug"]:
print(f"Configuration {setting_name} set to {setting_value}")
ASCIIColors.success(f"Configuration {setting_name} updated")
if lollmsElfServer.config.auto_save:
lollmsElfServer.config.save_config()
# Tell that the setting was changed
return {'setting_name': setting_name, "status":True}
except HTTPException as ex:
raise ex
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
@router.post("/apply_settings")
async def apply_settings(request: Request):
"""
Endpoint to apply configuration settings.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
path_traversal_prone_settings=[
"binding_name",
"model_name",
"model_variant",
"app_custom_logo",
"user_avatar",
"debug_log_file_path",
"petals_model_path",
"skills_lib_database_name",
"discussion_db_name"
"user_avatar",
]
# Prevent all outsiders from sending something to this endpoint
forbid_remote_access(lollmsElfServer)
if lollmsElfServer.config.turn_on_setting_update_validation:
if not show_yes_no_dialog("WARNING!!!","I received a settings modification request.\nIf this was not initiated by you, please select No. Are you the one who requested these settings changes?"):
return {"status":False,"error": "A settings modification attempt not validated by user"}
try:
config_data = await request.json()
config = config_data["config"]
check_access(lollmsElfServer, config_data["client_id"])
try:
for key in lollmsElfServer.config.config.keys():
if key=="host" and lollmsElfServer.config.config[key] in ["127.0.0.1","localhost"] and config.get(key, lollmsElfServer.config.config[key]) not in ["127.0.0.1","localhost"]:
if not show_yes_no_dialog("WARNING!!!","You are changing the host value to something other than localhost, which can be dangerous if you do not trust the network you are on.\nIt is strongly advised not to do this as it may expose your computer to remote access, posing potential security risks.\nDo you want to ignore this message and proceed with changing the host value?"):
config["host"]=lollmsElfServer.config.config[key]
if key=="turn_on_code_validation" and lollmsElfServer.config.config[key]==True and config.get(key, lollmsElfServer.config.config[key])==False:
if not show_yes_no_dialog("WARNING!!!","I received a request to deactivate code execution validation.\nAre you sure?\nThis is a very bad idea, especially if you activate remote access.\nProceeding without proper validation can pose a serious security risk to your system and data.\nOnly proceed if you are absolutely certain of the security measures in place.\nDo you want to continue despite the warning?"):
config["turn_on_code_validation"]=False
if key=="turn_on_setting_update_validation" and lollmsElfServer.config.config[key]==True and config.get(key, lollmsElfServer.config.config[key])==False:
if not show_yes_no_dialog("WARNING!!!","I received a request to deactivate settings update validation.\nAre you sure?\nThis is a very risky decision, especially if you have enabled remote access.\nDisabling this validation can allow attackers to manipulate server settings and gain unauthorized access.\nProceed only if you are completely confident in the security of your system.\nDo you want to continue despite the warning?"):
config["turn_on_setting_update_validation"]=False
if key in path_traversal_prone_settings:
config[key]=sanitize_path(config.get(key, lollmsElfServer.config.config[key]))
lollmsElfServer.config.config[key] = config.get(key, lollmsElfServer.config.config[key])
ASCIIColors.success("OK")
lollmsElfServer.rebuild_personalities()
lollmsElfServer.verify_servers()
if lollmsElfServer.config.auto_save:
lollmsElfServer.config.save_config()
return {"status":True}
except Exception as ex:
trace_exception(ex)
return {"status":False,"error":str(ex)}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
@router.post("/save_settings")
def save_settings():
lollmsElfServer.config.save_config()
if lollmsElfServer.config["debug"]:
print("Configuration saved")
# Tell that the setting was changed
lollmsElfServer.sio.emit('save_settings', {"status":True})
return {"status":True}
|
GHSA-8mrm-r7h3-c3hj
|
lib/ansible/plugins/callback/splunk.py
|
@@ -98,6 +98,9 @@ def send_event(self, url, authtoken, state, result, runtime):
else:
ansible_role = None
+ if 'args' in result._task_fields:
+ del result._task_fields['args']
+
data = {}
data['uuid'] = result._task._uuid
data['session'] = self.session
|
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: splunk
type: aggregate
short_description: Sends task result events to Splunk HTTP Event Collector
author: "Stuart Hirst <[email protected]>"
description:
- This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
- The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/"
- Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based.
version_added: "2.7"
requirements:
- Whitelisting this callback plugin
- 'Create a HTTP Event Collector in Splunk'
- 'Define the url and token in ansible.cfg'
options:
url:
description: URL to the Splunk HTTP collector source
env:
- name: SPLUNK_URL
ini:
- section: callback_splunk
key: url
authtoken:
description: Token to authenticate the connection to the Splunk HTTP collector
env:
- name: SPLUNK_AUTHTOKEN
ini:
- section: callback_splunk
key: authtoken
'''
EXAMPLES = '''
examples: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = splunk
Set the environment variable
export SPLUNK_URL=http://mysplunkinstance.datapaas.io:8088/services/collector/event
export SPLUNK_AUTHTOKEN=f23blad6-5965-4537-bf69-5b5a545blabla88
Set the ansible.cfg variable in the callback_splunk block
[callback_splunk]
url = http://mysplunkinstance.datapaas.io:8088/services/collector/event
authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88
'''
import json
import uuid
import socket
import getpass
from datetime import datetime
from os.path import basename
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
class SplunkHTTPCollectorSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.ip_address = socket.gethostbyname(socket.gethostname())
self.user = getpass.getuser()
def send_event(self, url, authtoken, state, result, runtime):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
if result._task_fields['args'].get('_ansible_version'):
self.ansible_version = \
result._task_fields['args'].get('_ansible_version')
if result._task._role:
ansible_role = str(result._task._role)
else:
ansible_role = None
data = {}
data['uuid'] = result._task._uuid
data['session'] = self.session
data['status'] = state
data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
'+0000')
data['host'] = self.host
data['ip_address'] = self.ip_address
data['user'] = self.user
data['runtime'] = runtime
data['ansible_version'] = self.ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
data['ansible_role'] = ansible_role
data['ansible_task'] = result._task_fields
data['ansible_result'] = result._result
# This wraps the json payload in and outer json event needed by Splunk
jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True)
jsondata = '{"event":' + jsondata + "}"
open_url(
url,
jsondata,
headers={
'Content-type': 'application/json',
'Authorization': 'Splunk ' + authtoken
},
method='POST'
)
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'splunk'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.start_datetimes = {} # Collect task start times
self.url = None
self.authtoken = None
self.splunk = SplunkHTTPCollectorSource()
def _runtime(self, result):
return (
datetime.utcnow() -
self.start_datetimes[result._task._uuid]
).total_seconds()
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.url = self.get_option('url')
if self.url is None:
self.disabled = True
self._display.warning('Splunk HTTP collector source URL was '
'not provided. The Splunk HTTP collector '
'source URL can be provided using the '
'`SPLUNK_URL` environment variable or '
'in the ansible.cfg file.')
self.authtoken = self.get_option('authtoken')
if self.authtoken is None:
self.disabled = True
self._display.warning('Splunk HTTP collector requires an authentication'
'token. The Splunk HTTP collector '
'authentication token can be provided using the '
'`SPLUNK_AUTHTOKEN` environment variable or '
'in the ansible.cfg file.')
def v2_playbook_on_start(self, playbook):
self.splunk.ansible_playbook = basename(playbook._file_name)
def v2_playbook_on_task_start(self, task, is_conditional):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_playbook_on_handler_task_start(self, task):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_runner_on_ok(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'OK',
result,
self._runtime(result)
)
def v2_runner_on_skipped(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'SKIPPED',
result,
self._runtime(result)
)
def v2_runner_on_failed(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'FAILED',
result,
self._runtime(result)
)
def runner_on_async_failed(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'FAILED',
result,
self._runtime(result)
)
def v2_runner_on_unreachable(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'UNREACHABLE',
result,
self._runtime(result)
)
|
GHSA-3m93-m4q6-mc6v
|
lib/ansible/plugins/callback/sumologic.py
|
@@ -89,6 +89,9 @@ def send_event(self, url, state, result, runtime):
else:
ansible_role = None
+ if 'args' in result._task_fields:
+ del result._task_fields['args']
+
data = {}
data['uuid'] = result._task._uuid
data['session'] = self.session
|
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: sumologic
type: aggregate
short_description: Sends task result events to Sumologic
author: "Ryan Currah (@ryancurrah)"
description:
- This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source
version_added: "2.6"
requirements:
- Whitelisting this callback plugin
- 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of C(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator
of C("timestamp": "(.*)")'
options:
url:
description: URL to the Sumologic HTTP collector source
env:
- name: SUMOLOGIC_URL
ini:
- section: callback_sumologic
key: url
'''
EXAMPLES = '''
examples: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = sumologic
Set the environment variable
export SUMOLOGIC_URL=https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp==
Set the ansible.cfg variable in the callback_sumologic block
[callback_sumologic]
url = https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp==
'''
import json
import uuid
import socket
import getpass
from datetime import datetime
from os.path import basename
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
class SumologicHTTPCollectorSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.ip_address = socket.gethostbyname(socket.gethostname())
self.user = getpass.getuser()
def send_event(self, url, state, result, runtime):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
if result._task_fields['args'].get('_ansible_version'):
self.ansible_version = \
result._task_fields['args'].get('_ansible_version')
if result._task._role:
ansible_role = str(result._task._role)
else:
ansible_role = None
data = {}
data['uuid'] = result._task._uuid
data['session'] = self.session
data['status'] = state
data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
'+0000')
data['host'] = self.host
data['ip_address'] = self.ip_address
data['user'] = self.user
data['runtime'] = runtime
data['ansible_version'] = self.ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
data['ansible_role'] = ansible_role
data['ansible_task'] = result._task_fields
data['ansible_result'] = result._result
open_url(
url,
data=json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True),
headers={
'Content-type': 'application/json',
'X-Sumo-Host': data['ansible_host']
},
method='POST'
)
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'sumologic'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.start_datetimes = {} # Collect task start times
self.url = None
self.sumologic = SumologicHTTPCollectorSource()
def _runtime(self, result):
return (
datetime.utcnow() -
self.start_datetimes[result._task._uuid]
).total_seconds()
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.url = self.get_option('url')
if self.url is None:
self.disabled = True
self._display.warning('Sumologic HTTP collector source URL was '
'not provided. The Sumologic HTTP collector '
'source URL can be provided using the '
'`SUMOLOGIC_URL` environment variable or '
'in the ansible.cfg file.')
def v2_playbook_on_start(self, playbook):
self.sumologic.ansible_playbook = basename(playbook._file_name)
def v2_playbook_on_task_start(self, task, is_conditional):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_playbook_on_handler_task_start(self, task):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_runner_on_ok(self, result, **kwargs):
self.sumologic.send_event(
self.url,
'OK',
result,
self._runtime(result)
)
def v2_runner_on_skipped(self, result, **kwargs):
self.sumologic.send_event(
self.url,
'SKIPPED',
result,
self._runtime(result)
)
def v2_runner_on_failed(self, result, **kwargs):
self.sumologic.send_event(
self.url,
'FAILED',
result,
self._runtime(result)
)
def runner_on_async_failed(self, result, **kwargs):
self.sumologic.send_event(
self.url,
'FAILED',
result,
self._runtime(result)
)
def v2_runner_on_unreachable(self, result, **kwargs):
self.sumologic.send_event(
self.url,
'UNREACHABLE',
result,
self._runtime(result)
)
|
GHSA-3m93-m4q6-mc6v
|
llama-index-core/llama_index/core/exec_utils.py
|
@@ -45,20 +45,16 @@ def _restricted_import(
"float": float,
"format": format,
"frozenset": frozenset,
- "getattr": getattr,
- "hasattr": hasattr,
"hash": hash,
"hex": hex,
"int": int,
"isinstance": isinstance,
"issubclass": issubclass,
- "iter": iter,
"len": len,
"list": list,
"map": map,
"max": max,
"min": min,
- "next": next,
"oct": oct,
"ord": ord,
"pow": pow,
@@ -68,7 +64,6 @@ def _restricted_import(
"reversed": reversed,
"round": round,
"set": set,
- "setattr": setattr,
"slice": slice,
"sorted": sorted,
"str": str,
@@ -94,23 +89,31 @@ def _get_restricted_globals(__globals: Union[dict, None]) -> Any:
class DunderVisitor(ast.NodeVisitor):
def __init__(self) -> None:
self.has_access_to_private_entity = False
+ self.has_access_to_disallowed_builtin = False
def visit_Name(self, node: ast.Name) -> None:
if node.id.startswith("_"):
self.has_access_to_private_entity = True
+ if node.id not in ALLOWED_BUILTINS:
+ self.has_access_to_disallowed_builtin = True
self.generic_visit(node)
def visit_Attribute(self, node: ast.Attribute) -> None:
if node.attr.startswith("_"):
self.has_access_to_private_entity = True
+ if node.attr not in ALLOWED_BUILTINS:
+ self.has_access_to_disallowed_builtin = True
self.generic_visit(node)
def _contains_protected_access(code: str) -> bool:
tree = ast.parse(code)
dunder_visitor = DunderVisitor()
dunder_visitor.visit(tree)
- return dunder_visitor.has_access_to_private_entity
+ return (
+ dunder_visitor.has_access_to_private_entity
+ or dunder_visitor.has_access_to_disallowed_builtin
+ )
def _verify_source_safety(__source: Union[str, bytes, CodeType]) -> None:
@@ -124,7 +127,8 @@ def _verify_source_safety(__source: Union[str, bytes, CodeType]) -> None:
__source = __source.decode()
if _contains_protected_access(__source):
raise RuntimeError(
- "Execution of code containing references to private or dunder methods is forbidden!"
+ "Execution of code containing references to private or dunder methods, "
+ "or disallowed builtins, is forbidden!"
)
|
import ast
import copy
from types import CodeType, ModuleType
from typing import Any, Dict, Mapping, Sequence, Union
ALLOWED_IMPORTS = {
"math",
"time",
"datetime",
"pandas",
"scipy",
"numpy",
"matplotlib",
"plotly",
"seaborn",
}
def _restricted_import(
name: str,
globals: Union[Mapping[str, object], None] = None,
locals: Union[Mapping[str, object], None] = None,
fromlist: Sequence[str] = (),
level: int = 0,
) -> ModuleType:
if name in ALLOWED_IMPORTS:
return __import__(name, globals, locals, fromlist, level)
raise ImportError(f"Import of module '{name}' is not allowed")
ALLOWED_BUILTINS = {
"abs": abs,
"all": all,
"any": any,
"ascii": ascii,
"bin": bin,
"bool": bool,
"bytearray": bytearray,
"bytes": bytes,
"chr": chr,
"complex": complex,
"divmod": divmod,
"enumerate": enumerate,
"filter": filter,
"float": float,
"format": format,
"frozenset": frozenset,
"getattr": getattr,
"hasattr": hasattr,
"hash": hash,
"hex": hex,
"int": int,
"isinstance": isinstance,
"issubclass": issubclass,
"iter": iter,
"len": len,
"list": list,
"map": map,
"max": max,
"min": min,
"next": next,
"oct": oct,
"ord": ord,
"pow": pow,
"print": print,
"range": range,
"repr": repr,
"reversed": reversed,
"round": round,
"set": set,
"setattr": setattr,
"slice": slice,
"sorted": sorted,
"str": str,
"sum": sum,
"tuple": tuple,
"type": type,
"zip": zip,
# Constants
"True": True,
"False": False,
"None": None,
"__import__": _restricted_import,
}
def _get_restricted_globals(__globals: Union[dict, None]) -> Any:
restricted_globals = copy.deepcopy(ALLOWED_BUILTINS)
if __globals:
restricted_globals.update(__globals)
return restricted_globals
class DunderVisitor(ast.NodeVisitor):
def __init__(self) -> None:
self.has_access_to_private_entity = False
def visit_Name(self, node: ast.Name) -> None:
if node.id.startswith("_"):
self.has_access_to_private_entity = True
self.generic_visit(node)
def visit_Attribute(self, node: ast.Attribute) -> None:
if node.attr.startswith("_"):
self.has_access_to_private_entity = True
self.generic_visit(node)
def _contains_protected_access(code: str) -> bool:
tree = ast.parse(code)
dunder_visitor = DunderVisitor()
dunder_visitor.visit(tree)
return dunder_visitor.has_access_to_private_entity
def _verify_source_safety(__source: Union[str, bytes, CodeType]) -> None:
"""
Verify that the source is safe to execute. For now, this means that it
does not contain any references to private or dunder methods.
"""
if isinstance(__source, CodeType):
raise RuntimeError("Direct execution of CodeType is forbidden!")
if isinstance(__source, bytes):
__source = __source.decode()
if _contains_protected_access(__source):
raise RuntimeError(
"Execution of code containing references to private or dunder methods is forbidden!"
)
def safe_eval(
__source: Union[str, bytes, CodeType],
__globals: Union[Dict[str, Any], None] = None,
__locals: Union[Mapping[str, object], None] = None,
) -> Any:
"""
eval within safe global context.
"""
_verify_source_safety(__source)
return eval(__source, _get_restricted_globals(__globals), __locals)
def safe_exec(
__source: Union[str, bytes, CodeType],
__globals: Union[Dict[str, Any], None] = None,
__locals: Union[Mapping[str, object], None] = None,
) -> None:
"""
eval within safe global context.
"""
_verify_source_safety(__source)
return exec(__source, _get_restricted_globals(__globals), __locals)
|
GHSA-wvpx-g427-q9wc
|
keystone/common/config.py
|
@@ -188,7 +188,7 @@ def configure():
register_cli_str('pydev-debug-host', default=None)
register_cli_int('pydev-debug-port', default=None)
- register_str('admin_token', default='ADMIN')
+ register_str('admin_token', secret=True, default='ADMIN')
register_str('bind_host', default='0.0.0.0')
register_int('compute_port', default=8774)
register_int('admin_port', default=35357)
@@ -286,7 +286,7 @@ def configure():
# ldap
register_str('url', group='ldap', default='ldap://localhost')
register_str('user', group='ldap', default=None)
- register_str('password', group='ldap', default=None)
+ register_str('password', group='ldap', secret=True, default=None)
register_str('suffix', group='ldap', default='cn=example,cn=com')
register_bool('use_dumb_member', group='ldap', default=False)
register_str('dumb_member', group='ldap', default='cn=dumb,dc=nonexistent')
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import os
import sys
from oslo.config import cfg
from keystone.common import logging
gettext.install('keystone', unicode=1)
_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_DEFAULT_AUTH_METHODS = ['password', 'token']
COMMON_CLI_OPTS = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
LOGGING_CLI_OPTS = [
cfg.StrOpt('log-config',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=_DEFAULT_LOG_FORMAT,
metavar='FORMAT',
help='A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records.'),
cfg.StrOpt('log-file',
metavar='PATH',
help='Name of log file to output. '
'If not set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
help='The directory in which to store log files. '
'(will be prepended to --log-file)'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines.')
]
CONF = cfg.CONF
def setup_logging(conf):
"""
Sets up the logging options for a log with supplied name
:param conf: a cfg.ConfOpts object
"""
if conf.log_config:
# Use a logging configuration file for all settings...
if os.path.exists(conf.log_config):
logging.config.fileConfig(conf.log_config)
return
else:
raise RuntimeError(_('Unable to locate specified logging '
'config file: %s') % conf.log_config)
root_logger = logging.root
if conf.debug:
root_logger.setLevel(logging.DEBUG)
elif conf.verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter(conf.log_format, conf.log_date_format)
if conf.use_syslog:
try:
facility = getattr(logging.SysLogHandler,
conf.syslog_log_facility)
except AttributeError:
raise ValueError(_('Invalid syslog facility'))
handler = logging.SysLogHandler(address='/dev/log',
facility=facility)
elif conf.log_file:
logfile = conf.log_file
if conf.log_dir:
logfile = os.path.join(conf.log_dir, logfile)
handler = logging.WatchedFileHandler(logfile)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def setup_authentication():
# register any non-default auth methods here (used by extensions, etc)
for method_name in CONF.auth.methods:
if method_name not in _DEFAULT_AUTH_METHODS:
register_str(method_name, group="auth")
def register_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.StrOpt(*args, **kw), group=group)
def register_cli_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.StrOpt(*args, **kw), group=group)
def register_list(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.ListOpt(*args, **kw), group=group)
def register_cli_list(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.ListOpt(*args, **kw), group=group)
def register_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_cli_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.IntOpt(*args, **kw), group=group)
def register_cli_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.IntOpt(*args, **kw), group=group)
def configure():
CONF.register_cli_opts(COMMON_CLI_OPTS)
CONF.register_cli_opts(LOGGING_CLI_OPTS)
register_cli_bool('standard-threads', default=False)
register_cli_str('pydev-debug-host', default=None)
register_cli_int('pydev-debug-port', default=None)
register_str('admin_token', default='ADMIN')
register_str('bind_host', default='0.0.0.0')
register_int('compute_port', default=8774)
register_int('admin_port', default=35357)
register_int('public_port', default=5000)
register_str(
'public_endpoint', default='http://localhost:%(public_port)s/')
register_str('admin_endpoint', default='http://localhost:%(admin_port)s/')
register_str('onready')
register_str('auth_admin_prefix', default='')
register_str('policy_file', default='policy.json')
register_str('policy_default_rule', default=None)
# default max request size is 112k
register_int('max_request_body_size', default=114688)
register_int('max_param_size', default=64)
# we allow tokens to be a bit larger to accommodate PKI
register_int('max_token_size', default=8192)
register_str(
'member_role_id', default='9fe2ff9ee4384b1894a90878d3e92bab')
register_str('member_role_name', default='_member_')
# identity
register_str('default_domain_id', group='identity', default='default')
# trust
register_bool('enabled', group='trust', default=True)
# ssl
register_bool('enable', group='ssl', default=False)
register_str('certfile', group='ssl',
default="/etc/keystone/ssl/certs/keystone.pem")
register_str('keyfile', group='ssl',
default="/etc/keystone/ssl/private/keystonekey.pem")
register_str('ca_certs', group='ssl',
default="/etc/keystone/ssl/certs/ca.pem")
register_str('ca_key', group='ssl',
default="/etc/keystone/ssl/certs/cakey.pem")
register_bool('cert_required', group='ssl', default=False)
register_int('key_size', group='ssl', default=1024)
register_int('valid_days', group='ssl', default=3650)
register_str('ca_password', group='ssl', default=None)
register_str('cert_subject', group='ssl',
default='/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost')
# signing
register_str(
'token_format', group='signing', default="PKI")
register_str(
'certfile',
group='signing',
default="/etc/keystone/ssl/certs/signing_cert.pem")
register_str(
'keyfile',
group='signing',
default="/etc/keystone/ssl/private/signing_key.pem")
register_str(
'ca_certs',
group='signing',
default="/etc/keystone/ssl/certs/ca.pem")
register_str('ca_key', group='signing',
default="/etc/keystone/ssl/certs/cakey.pem")
register_int('key_size', group='signing', default=1024)
register_int('valid_days', group='signing', default=3650)
register_str('ca_password', group='signing', default=None)
register_str('cert_subject', group='signing',
default='/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com')
# sql
register_str('connection', group='sql', secret=True,
default='sqlite:///keystone.db')
register_int('idle_timeout', group='sql', default=200)
register_str(
'driver',
group='catalog',
default='keystone.catalog.backends.sql.Catalog')
register_str(
'driver',
group='identity',
default='keystone.identity.backends.sql.Identity')
register_str(
'driver',
group='policy',
default='keystone.policy.backends.sql.Policy')
register_str(
'driver', group='token', default='keystone.token.backends.kvs.Token')
register_str(
'driver', group='trust', default='keystone.trust.backends.sql.Trust')
register_str(
'driver', group='ec2', default='keystone.contrib.ec2.backends.kvs.Ec2')
register_str(
'driver',
group='stats',
default='keystone.contrib.stats.backends.kvs.Stats')
# ldap
register_str('url', group='ldap', default='ldap://localhost')
register_str('user', group='ldap', default=None)
register_str('password', group='ldap', default=None)
register_str('suffix', group='ldap', default='cn=example,cn=com')
register_bool('use_dumb_member', group='ldap', default=False)
register_str('dumb_member', group='ldap', default='cn=dumb,dc=nonexistent')
register_bool('allow_subtree_delete', group='ldap', default=False)
register_str('query_scope', group='ldap', default='one')
register_int('page_size', group='ldap', default=0)
register_str('alias_dereferencing', group='ldap', default='default')
register_str('user_tree_dn', group='ldap', default=None)
register_str('user_filter', group='ldap', default=None)
register_str('user_objectclass', group='ldap', default='inetOrgPerson')
register_str('user_id_attribute', group='ldap', default='cn')
register_str('user_name_attribute', group='ldap', default='sn')
register_str('user_mail_attribute', group='ldap', default='email')
register_str('user_pass_attribute', group='ldap', default='userPassword')
register_str('user_enabled_attribute', group='ldap', default='enabled')
register_str(
'user_domain_id_attribute', group='ldap', default='businessCategory')
register_int('user_enabled_mask', group='ldap', default=0)
register_str('user_enabled_default', group='ldap', default='True')
register_list(
'user_attribute_ignore', group='ldap', default='tenant_id,tenants')
register_bool('user_allow_create', group='ldap', default=True)
register_bool('user_allow_update', group='ldap', default=True)
register_bool('user_allow_delete', group='ldap', default=True)
register_bool('user_enabled_emulation', group='ldap', default=False)
register_str('user_enabled_emulation_dn', group='ldap', default=None)
register_str('tenant_tree_dn', group='ldap', default=None)
register_str('tenant_filter', group='ldap', default=None)
register_str('tenant_objectclass', group='ldap', default='groupOfNames')
register_str('tenant_id_attribute', group='ldap', default='cn')
register_str('tenant_member_attribute', group='ldap', default='member')
register_str('tenant_name_attribute', group='ldap', default='ou')
register_str('tenant_desc_attribute', group='ldap', default='description')
register_str('tenant_enabled_attribute', group='ldap', default='enabled')
register_str(
'tenant_domain_id_attribute', group='ldap', default='businessCategory')
register_list('tenant_attribute_ignore', group='ldap', default='')
register_bool('tenant_allow_create', group='ldap', default=True)
register_bool('tenant_allow_update', group='ldap', default=True)
register_bool('tenant_allow_delete', group='ldap', default=True)
register_bool('tenant_enabled_emulation', group='ldap', default=False)
register_str('tenant_enabled_emulation_dn', group='ldap', default=None)
register_str('role_tree_dn', group='ldap', default=None)
register_str('role_filter', group='ldap', default=None)
register_str(
'role_objectclass', group='ldap', default='organizationalRole')
register_str('role_id_attribute', group='ldap', default='cn')
register_str('role_name_attribute', group='ldap', default='ou')
register_str('role_member_attribute', group='ldap', default='roleOccupant')
register_list('role_attribute_ignore', group='ldap', default='')
register_bool('role_allow_create', group='ldap', default=True)
register_bool('role_allow_update', group='ldap', default=True)
register_bool('role_allow_delete', group='ldap', default=True)
register_str('group_tree_dn', group='ldap', default=None)
register_str('group_filter', group='ldap', default=None)
register_str('group_objectclass', group='ldap', default='groupOfNames')
register_str('group_id_attribute', group='ldap', default='cn')
register_str('group_name_attribute', group='ldap', default='ou')
register_str('group_member_attribute', group='ldap', default='member')
register_str('group_desc_attribute', group='ldap', default='description')
register_str(
'group_domain_id_attribute', group='ldap', default='businessCategory')
register_list('group_attribute_ignore', group='ldap', default='')
register_bool('group_allow_create', group='ldap', default=True)
register_bool('group_allow_update', group='ldap', default=True)
register_bool('group_allow_delete', group='ldap', default=True)
register_str('domain_tree_dn', group='ldap', default=None)
register_str('domain_filter', group='ldap', default=None)
register_str('domain_objectclass', group='ldap', default='groupOfNames')
register_str('domain_id_attribute', group='ldap', default='cn')
register_str('domain_name_attribute', group='ldap', default='ou')
register_str('domain_member_attribute', group='ldap', default='member')
register_str('domain_desc_attribute', group='ldap', default='description')
register_str('domain_enabled_attribute', group='ldap', default='enabled')
register_list('domain_attribute_ignore', group='ldap', default='')
register_bool('domain_allow_create', group='ldap', default=True)
register_bool('domain_allow_update', group='ldap', default=True)
register_bool('domain_allow_delete', group='ldap', default=True)
register_bool('domain_enabled_emulation', group='ldap', default=False)
register_str('domain_enabled_emulation_dn', group='ldap', default=None)
register_str('tls_cacertfile', group='ldap', default=None)
register_str('tls_cacertdir', group='ldap', default=None)
register_bool('use_tls', group='ldap', default=False)
register_str('tls_req_cert', group='ldap', default='demand')
# pam
register_str('url', group='pam', default=None)
register_str('userid', group='pam', default=None)
register_str('password', group='pam', default=None)
# default authentication methods
register_list('methods', group='auth', default=_DEFAULT_AUTH_METHODS)
register_str(
'password', group='auth', default='keystone.auth.plugins.token.Token')
register_str(
'token', group='auth',
default='keystone.auth.plugins.password.Password')
# register any non-default auth methods here (used by extensions, etc)
for method_name in CONF.auth.methods:
if method_name not in _DEFAULT_AUTH_METHODS:
register_str(method_name, group='auth')
|
GHSA-rxrm-xvp4-jqvh
|
tensorflow/python/ops/image_ops_test.py
|
@@ -3161,6 +3161,14 @@ def testPreserveAspectRatioSquare(self):
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
+ def testLargeDim(self):
+ with self.session():
+ with self.assertRaises(errors.InternalError):
+ x = np.ones((5, 1, 1, 2))
+ v = image_ops.resize_images_v2(x, [1610637938, 1610637938],
+ image_ops.ResizeMethod.BILINEAR)
+ _ = self.evaluate(v)
+
class ResizeImagesTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import functools
import itertools
import math
import os
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import get_single_element
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session():
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = self.evaluate(rgb)
self.assertAllClose(rgb_tf, rgb_np)
class RGBToYIQTest(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yiq and yiq_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class RGBToYUVTest(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yuv and yuv_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testGrayscaleToRGBInputValidation(self):
# tests whether the grayscale_to_rgb function raises
# an exception if the input images' last dimension is
# not of size 1, i.e. the images have shape
# [batch size, height, width] or [height, width]
# tests if an exception is raised if a three dimensional
# input is used, i.e. the images have shape [batch size, height, width]
with self.cached_session():
# 3-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "Last dimension of a grayscale image should be size 1"
with self.assertRaisesRegex(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
# tests if an exception is raised if a two dimensional
# input is used, i.e. the images have shape [height, width]
with self.cached_session():
# 1-D input without batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "must be at least two-dimensional"
with self.assertRaisesRegex(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.cached_session():
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.cached_session():
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.cached_session():
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.cached_session():
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
def test_adjust_gamma_less_zero_float32(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_gamma(x, gamma=-1)
def test_adjust_gamma_less_zero_uint8(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_gamma(x, gamma=-1)
def test_adjust_gamma_less_zero_tensor(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = constant_op.constant(-1.0, dtype=dtypes.float32)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image = image_ops.adjust_gamma(x, gamma=y)
self.evaluate(image)
def _test_adjust_gamma_uint8(self, gamma):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 255, (8, 8)).astype(np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = np.trunc(self.evaluate(y))
# calculate gamma correction using numpy
# firstly, transform uint8 to float representation
# then perform correction
y_np = np.power(x_np / 255.0, gamma)
# convert correct numpy image back to uint8 type
y_np = np.trunc(np.clip(y_np * 255.5, 0, 255.0))
self.assertAllClose(y_tf, y_np, 1e-6)
def _test_adjust_gamma_float32(self, gamma):
"""Verifying the output with expected results for gamma
correction for float32 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 1.0, (8, 8))
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = self.evaluate(y)
y_np = np.clip(np.power(x_np, gamma), 0, 1.0)
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_one_float32(self):
"""Same image should be returned for gamma equal to one"""
self._test_adjust_gamma_float32(1.0)
def test_adjust_gamma_one_uint8(self):
self._test_adjust_gamma_uint8(1.0)
def test_adjust_gamma_zero_uint8(self):
"""White image should be returned for gamma equal
to zero for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.0)
def test_adjust_gamma_less_one_uint8(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.5)
def test_adjust_gamma_greater_one_uint8(self):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=1.0)
def test_adjust_gamma_less_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for float32 images
"""
self._test_adjust_gamma_float32(0.5)
def test_adjust_gamma_greater_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two for float32 images
"""
self._test_adjust_gamma_float32(1.0)
def test_adjust_gamma_zero_float32(self):
"""White image should be returned for gamma equal
to zero for float32 images
"""
self._test_adjust_gamma_float32(0.0)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.cached_session():
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = self.evaluate(y)
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegex(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class FlipImageBenchmark(test.Benchmark):
def _benchmarkFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkRandomFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):
image_shape = [16, 299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: "
"%.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkFlipLeftRightCpu1(self):
self._benchmarkFlipLeftRight("/cpu:0", 1)
def benchmarkFlipLeftRightCpuAll(self):
self._benchmarkFlipLeftRight("/cpu:0", None)
def benchmarkFlipLeftRightGpu(self):
self._benchmarkFlipLeftRight(test.gpu_device_name(), None)
def benchmarkRandomFlipLeftRightCpu1(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", 1)
def benchmarkRandomFlipLeftRightCpuAll(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", None)
def benchmarkRandomFlipLeftRightGpu(self):
self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)
def benchmarkBatchedRandomFlipLeftRightCpu1(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1)
def benchmarkBatchedRandomFlipLeftRightCpuAll(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None)
def benchmarkBatchedRandomFlipLeftRightGpu(self):
self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
self.evaluate(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchSaturation(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.cached_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = self.evaluate(image_ops.adjust_saturation(x_np, scale))
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testInvolutionLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipLeftRightStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down),
)
def testRandomFlipStateless(self, func):
with test_util.use_gpu():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
if y_tf_eval[0][0] == 1:
self.assertAllEqual(y_tf_eval, x_np)
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval, y_np)
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
# Verify that results are deterministic.
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down)
)
def testRandomFlipStatelessWithBatch(self, func):
with test_util.use_gpu():
batch_size = 16
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([1, 2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
for j in range(batch_size):
if y_tf_eval[j][0][0] == 1:
self.assertAllEqual(y_tf_eval[j], x_np[j])
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval[j], y_np[j])
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
def testRandomFlipLeftRightWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [3, 2, 1]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipUpDownStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
def testRandomFlipUpDownWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
dtype=np.uint8).reshape([2, 3, 2, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testPartialShapes(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims_3 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_dims_4 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_unknown_batch = array_ops.placeholder(
dtypes.uint8, shape=[None, 64, 64, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
#Ops that support 3D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertIsNone(transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims_3 = op(p_unknown_dims_3)
self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegex(ValueError, "must be > 0"):
op(p_zero_dim)
#Ops that support 4D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_dims_4 = op(p_unknown_dims_4)
self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
transformed_unknown_batch = op(p_unknown_batch)
self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
with self.assertRaisesRegex(ValueError,
"must be at least three-dimensional"):
op(p_wrong_rank)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session():
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90GroupOrderWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session():
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session():
for k in xrange(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testRot90NumpyEquivalenceWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session():
for k in xrange(4):
y_np = np.rot90(image, k=k, axes=(1, 2))
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testFlipImageUnknownShape(self):
expected_output = constant_op.constant([[[[3, 4, 5], [0, 1, 2]],
[[9, 10, 11], [6, 7, 8]]]])
def generator():
image_input = np.array(
[[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], np.int32)
yield image_input
dataset = dataset_ops.Dataset.from_generator(
generator,
output_types=dtypes.int32,
output_shapes=tensor_shape.TensorShape([1, 2, 2, 3]))
dataset = dataset.map(image_ops.flip_left_right)
image_flipped_via_dataset_map = get_single_element.get_single_element(
dataset.take(1))
self.assertAllEqual(image_flipped_via_dataset_map, expected_output)
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float64).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float64).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session():
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
def testContrastFactorShape(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"contrast_factor must be scalar|"
"Shape must be rank 0 but is rank 1"):
image_ops.adjust_contrast(x_np, [2.0])
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta, tol=1e-6):
with self.cached_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, tol)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat32(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat16(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
mn = np.mean(x)
std = np.std(x)
stddev = max(std, 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
@parameterized.named_parameters([("_int8", np.int8), ("_int16", np.int16),
("_int32", np.int32), ("_int64", np.int64),
("_uint8", np.uint8), ("_uint16", np.uint16),
("_uint32", np.uint32),
("_uint64", np.uint64),
("_float32", np.float32)])
def testBasic(self, data_type):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=data_type).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.cached_session():
x = constant_op.constant(x_np, dtype=data_type, shape=x_shape)
y = image_ops.per_image_standardization(x)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.cached_session():
whiten_np = self.evaluate(whiten)
self.assertFalse(np.any(np.isnan(whiten_np)))
def testBatchWhitening(self):
imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3])
whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np]
with self.cached_session():
imgs = constant_op.constant(imgs_np)
whiten = image_ops.per_image_standardization(imgs)
whiten_tf = self.evaluate(whiten)
for w_tf, w_np in zip(whiten_tf, whiten_np):
self.assertAllClose(w_tf, w_np, atol=1e-4)
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
with self.cached_session():
return self.evaluate(y)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (
([-1, 0, 3, 3], "offset_height must be >= 0"),
([0, -1, 3, 3], "offset_width must be >= 0"),
([0, 0, 0, 3], "target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], r"height must be >= target \+ offset"),
([0, 2, 3, 3], r"width must be >= target \+ offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.name.startswith("crop_to_bounding_box"))
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shapes = [[13, 9, 3], [5, 13, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
x_shape = [2, 4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],
[[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testCropping2(self):
# Test case for 10315
x_shapes = [[240, 320, 3], [5, 240, 320, 3]]
expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]
for x_shape, y_shape in zip(x_shapes, expected_y_shapes):
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y_tf = self.evaluate(image_ops.central_crop(x_np, 0.33))
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test no-op fraction=1.0, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
# Test no-op fraction=0.5, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])
self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])
self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])
self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# Test no-op fraction=1.0, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])
self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])
self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])
self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])
self._assertShapeInference([5, None, None, None], 1.0,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 1.0,
[None, None, None, None])
# Test no-op fraction=0.5, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])
self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])
self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])
self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])
self._assertShapeInference([5, None, None, None], 0.5,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 0.5,
[None, None, None, None])
def testErrorOnInvalidCentralCropFractionValues(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
def testErrorOnInvalidShapes(self):
x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.5)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y = image_ops.central_crop(x_np, 1.0)
self.assertTrue(y.op.name.startswith("central_crop"))
def testCentralFractionTensor(self):
# Test case for GitHub issue 45324.
x_shape = [240, 320, 3]
y_shape = [80, 106, 3]
@def_function.function(autograph=False)
def f(x, central_fraction):
return image_ops.central_crop(x, central_fraction)
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
y_tf = self.evaluate(f(x_np, constant_op.constant(0.33)))
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
class PadToBoundingBoxTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def pad_bbox(*args):
return image_ops.pad_to_bounding_box(*args)
with self.cached_session():
return self.evaluate(pad_bbox(x_tensor, offset_height, offset_width,
target_height, target_width))
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])
with self.cached_session():
self.assertAllClose(y, self.evaluate(y_tf))
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParamsScalarInputs(self):
# In this test, inputs do not get converted to tensors before calling the
# tf.function. The error message here is raised in python
# since the python function has direct access to the scalars.
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = (
(-1, 0, 4, 4,
"offset_height must be >= 0"),
(0, -1, 4, 4,
"offset_width must be >= 0"),
(2, 0, 4, 4,
"height must be <= target - offset"),
(0, 2, 4, 4,
"width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(
x, x_shape, *config_item, use_tensor_inputs_options=[False])
def testBadParamsTensorInputsEager(self):
# In this test inputs get converted to EagerTensors before calling the
# tf.function. The error message here is raised in python
# since the python function has direct access to the tensor's values.
with context.eager_mode():
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = (
(-1, 0, 4, 4,
"offset_height must be >= 0"),
(0, -1, 4, 4,
"offset_width must be >= 0"),
(2, 0, 4, 4,
"height must be <= target - offset"),
(0, 2, 4, 4,
"width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(
x, x_shape, *config_item, use_tensor_inputs_options=[True])
@parameterized.named_parameters([("OffsetHeight", (-1, 0, 4, 4)),
("OffsetWidth", (0, -1, 4, 4)),
("Height", (2, 0, 4, 4)),
("Width", (0, 2, 4, 4))])
def testBadParamsTensorInputsGraph(self, config):
# In this test inputs get converted to tensors before calling the
# tf.function. The error message here is raised during shape inference.
with context.graph_mode():
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
self._assertRaises(
x,
x_shape,
*config,
"Paddings must be non-negative",
use_tensor_inputs_options=[True])
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.cached_session():
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# min_object_covered as tensor
min_object_covered_t = ops.convert_to_tensor(min_object_covered)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered_t,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=array_ops.placeholder(dtypes.float32),
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
def testDefaultMinObjectCovered(self):
# By default min_object_covered=0.1 if not provided
with self.cached_session():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
def _testStatelessSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered,
aspect_ratio_range, area_range):
with test_util.use_gpu():
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
iterations = 2
test_seeds = [(1, 2), (3, 4), (5, 6)]
for seed in test_seeds:
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
for _ in range(iterations):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(bounding_box_np,
dtype=dtypes.float32,
shape=bounding_box_np.shape)
begin, size, _ = image_ops.stateless_sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
seed=seed,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratio = area / original_area
area_ratios.append(area_ratio)
fraction_object_covered.append(
float(np.sum(y_tf)) / bounding_box_area)
# Check that `area_ratio` is within valid range.
self.assertLessEqual(area_ratio, area_range[1])
self.assertGreaterEqual(area_ratio, area_range[0])
# Each array should consist of one value just repeated `iteration` times
# because the same seed is used.
self.assertEqual(len(set(aspect_ratios)), 1)
self.assertEqual(len(set(area_ratios)), 1)
self.assertEqual(len(set(fraction_object_covered)), 1)
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testWholeImageBoundingBoxStateless(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
for min_obj_covered in [0.1, constant_op.constant(0.1)]:
self._testStatelessSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=min_obj_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testWithBoundingBoxStateless(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
# Test both scalar and tensor input for `min_object_covered`.
for min_obj_covered in [0.1, constant_op.constant(0.1)]:
self._testStatelessSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_obj_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShapeStateless(self):
with test_util.use_gpu():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
bbox_func = functools.partial(
image_ops.stateless_sample_distorted_bounding_box,
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Check error is raised with wrong seed shapes.
for seed in [1, (1, 2, 3)]:
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
begin, end, bbox_for_drawing = bbox_func(seed=seed)
test_seed = (1, 2)
begin, end, bbox_for_drawing = bbox_func(seed=test_seed)
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
self.assertAllEqual([3], begin.shape)
self.assertAllEqual([3], end.shape)
self.assertAllEqual([1, 1, 4], bbox_for_drawing.shape)
class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase):
METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5,
image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC
]
# Some resize methods, such as Gaussian, are non-interpolating in that they
# change the image even if there is no scale change, for some test, we only
# check the value on the value preserving methods.
INTERPOLATING_METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images_v2(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
# half_pixel_centers unsupported in ResizeBilinear
@test_util.disable_xla("b/127616992")
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
def resize_func(t, new_size, method):
return image_ops.resize_images_v2(t, new_size, method)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = resize_func(image, [6, 4], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = resize_func(image, [6, 4], self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(single_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, 4.0], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [None, 4], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, None], image_ops.ResizeMethod.BILINEAR)
def testReturnDtypeV1(self):
# Shape inference in V1.
with ops.Graph().as_default():
target_shapes = [[6, 4], [3, 2],
[
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images_v2(image, target_shape, method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
def testReturnDtypeV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def test_dtype(image, target_shape, target_method):
y = image_ops.resize_images_v2(image, target_shape, target_method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
target_shapes = [[6, 4],
[3, 2],
[tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]]
for nptype in self.TYPES:
image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype)
for method in self.METHODS:
for target_shape in target_shapes:
with test_util.run_functions_eagerly(run_func_eagerly):
test_dtype.get_concrete_function(image, target_shape, method)
# half_pixel_centers not supported by XLA
@test_util.disable_xla("b/127616992")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(
image, [target_height, target_width], method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0,
36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5,
87.5, 100.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.LANCZOS3] = [
75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964,
35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045,
35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413
]
expected_data[image_ops.ResizeMethod.LANCZOS5] = [
77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709,
35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658,
32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109
]
expected_data[image_ops.ResizeMethod.GAUSSIAN] = [
61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074,
41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855,
47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619
]
expected_data[image_ops.ResizeMethod.BICUBIC] = [
70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981,
36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731,
41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284
]
expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [
66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959,
39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983,
43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739
]
for nptype in self.TYPES:
for method in expected_data:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-04)
# XLA doesn't implement half_pixel_centers
@test_util.disable_xla("b/127616992")
def testLegacyBicubicMethodsMatchNewMethods(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"),
(gen_image_ops.resize_bicubic, "keyscubic"))
for legacy_method, new_method in methods_to_test:
with self.cached_session():
img_np = np.array(data, dtype=np.float32).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
legacy_result = legacy_method(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
half_pixel_centers=True)
scale = (
constant_op.constant([target_height, target_width],
dtype=dtypes.float32) /
math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32))
new_result = gen_image_ops.scale_and_translate(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
scale,
array_ops.zeros([2]),
kernel_type=new_method,
antialias=False)
self.assertAllClose(
self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session():
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testBfloat16MultipleOps(self):
target_height = 8
target_width = 12
img = np.random.uniform(0, 100, size=(30, 10, 2)).astype(np.float32)
img_bf16 = ops.convert_to_tensor(img, dtype="bfloat16")
new_size = constant_op.constant([target_height, target_width])
img_methods = [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC,
image_ops.ResizeMethod.AREA
]
for method in img_methods:
out_op_bf16 = image_ops.resize_images_v2(img_bf16, new_size, method)
out_op_f32 = image_ops.resize_images_v2(img, new_size, method)
bf16_val = self.evaluate(out_op_bf16)
f32_val = self.evaluate(out_op_f32)
self.assertAllClose(bf16_val, f32_val, rtol=1e-2, atol=1e-2)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = ops.convert_to_tensor(x)
else:
target_max = (max_h, max_w)
x_tensor = x
def resize_func(t,
target_max=target_max,
preserve_aspect_ratio=preserve_aspect_ratio):
return image_ops.resize_images(
t, ops.convert_to_tensor(target_max),
preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session():
return self.evaluate(resize_func(x_tensor))
def _assertResizeEqual(self,
x,
x_shape,
y,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self,
x,
x_shape,
target_shape,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 80, 10]
x = np.random.uniform(size=x_shape)
for preserve_aspect_ratio in [True, False]:
with self.subTest(preserve_aspect_ratio=preserve_aspect_ratio):
expect_shape = [10, 250, 200, 10] if preserve_aspect_ratio \
else [10, 250, 250, 10]
self._assertResizeCheckShape(
x,
x_shape, [250, 250],
expect_shape,
preserve_aspect_ratio=preserve_aspect_ratio)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImagesTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
METHODS = [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.BICUBIC, image_ops.ResizeMethodV1.AREA
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
def resize_func(t, new_size, method):
return image_ops.resize_images(t, new_size, method)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = resize_func(image, [6, 4], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = resize_func(image, [6, 4], self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, 4.0], image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [None, 4], image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, None], image_ops.ResizeMethodV1.BILINEAR)
def testReturnDtypeV1(self):
# Shape inference in V1.
with ops.Graph().as_default():
target_shapes = [[6, 4], [3, 2], [
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images(image, target_shape, method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
def testReturnDtypeV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def test_dtype(image, target_shape, target_method):
y = image_ops.resize_images(image, target_shape, target_method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
target_shapes = [[6, 4],
[3, 2],
[tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]]
for nptype in self.TYPES:
image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype)
for method in self.METHODS:
for target_shape in target_shapes:
with test_util.run_functions_eagerly(run_func_eagerly):
test_dtype.get_concrete_function(image, target_shape, method)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUpAlignCornersFalse(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=False)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpAlignCornersTrue(self):
img_shape = [1, 3, 2, 1]
data = [6, 3, 3, 6, 6, 9]
target_height = 5
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,
6.5, 7.5, 6.0, 7.0, 8.0, 9.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,
9.0, 9.0, 6.0, 6.0, 9.0, 9.0
]
# TODO(b/37749740): Improve alignment of ResizeMethodV1.AREA when
# align_corners=True.
expected_data[image_ops.ResizeMethodV1.AREA] = [
6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,
3.0, 6.0, 6.0, 6.0, 6.0, 9.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=True)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [
128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [
128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,
105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,
75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.BICUBIC)
resized = self.evaluate(y)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session():
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.BILINEAR,
align_corners=align_corners)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
img_shape = [1, 3, 2, 1]
with self.cached_session():
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = ops.convert_to_tensor(x)
else:
target_max = [max_h, max_w]
x_tensor = x
y = image_ops.resize_images(
x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session():
return self.evaluate(y)
def _assertResizeEqual(self, x, x_shape, y, y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self, x, x_shape, target_shape,
y_shape, preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],
preserve_aspect_ratio=False)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImageWithPadV1Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
with self.cached_session():
return self.evaluate(
image_ops.resize_image_with_pad_v1(x_tensor, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test with 3-D tensors.
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
# Test with 4-D tensors.
self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, None, None, None], 55, 66,
[5, 55, 66, None])
self._assertShapeInference([None, None, None, None], 55, 66,
[None, 55, 66, None])
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 3, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# half_pixel_centers not supported by XLA
@test_util.for_all_test_methods(test_util.disable_xla, "b/127616992")
class ResizeImageWithPadV2Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
with self.cached_session():
return self.evaluate(
image_ops.resize_image_with_pad_v2(x_tensor, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test with 3-D tensors.
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
# Test with 4-D tensors.
self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, None, None, None], 55, 66,
[5, 55, 66, None])
self._assertShapeInference([None, None, None, None], 55, 66,
[None, 55, 66, None])
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 3.5, 5.5, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def resize_crop_or_pad(*args):
return image_ops.resize_image_with_crop_or_pad(*args)
with self.cached_session():
return self.evaluate(
resize_crop_or_pad(x_tensor, target_height, target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3, 6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4, 8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [3, 4, 5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
x_shape = [8, 2, 1]
y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 2, 3, 6, 7, 0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0, 0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"inner 3 dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)
self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad"))
def simple_color_ramp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session():
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.cached_session():
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = self.evaluate([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testCropAndDecodeJpeg(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],
[h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]
for crop_window in crop_windows:
# Explicit two stages: decode + crop.
image1 = image_ops.decode_jpeg(jpeg0)
y, x, h, w = crop_window
image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)
# Combined decode+crop.
image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window, channels=3)
# Combined decode+crop should have the same shape inference on image
# sizes.
image1_shape = image1_crop.get_shape().as_list()
image2_shape = image2.get_shape().as_list()
self.assertAllEqual(image1_shape, image2_shape)
# CropAndDecode should be equal to DecodeJpeg+Crop.
image1_crop, image2 = self.evaluate([image1_crop, image2])
self.assertAllEqual(image1_crop, image2)
def testCropAndDecodeJpegWithInvalidCropWindow(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
# Invalid crop windows.
crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],
[11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],
[0, 0, h + 1, w], [0, 0, h, w + 1]]
for crop_window in crop_windows:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Invalid JPEG data or crop window"):
result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
self.evaluate(result)
def testSynthetic(self):
with self.cached_session():
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.cached_session():
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.cached_session():
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = self.evaluate([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
def testExtractJpegShape(self):
# Read a real jpeg and verify shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session():
jpeg = io_ops.read_file(path)
# Extract shape without decoding.
image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg))
self.assertAllEqual(image_shape, [256, 128, 3])
def testExtractJpegShapeforCmyk(self):
# Read a cmyk jpeg image, and verify its shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1_cmyk.jpg")
with self.cached_session():
jpeg = io_ops.read_file(path)
image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg))
# Cmyk jpeg image has 4 channels.
self.assertAllEqual(image_shape, [256, 128, 4])
def testRandomJpegQuality(self):
# Previous implementation of random_jpeg_quality had a bug.
# This unit test tests the fixed version, but due to forward compatibility
# this test can only be done when fixed version is used.
# Test jpeg quality dynamic randomization.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100)
with self.cached_session() as sess:
# Test randomization.
random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)]
are_images_equal = []
for i in range(1, len(random_jpeg_images)):
# Most of them should be different if randomization is occurring
# correctly.
are_images_equal.append(
np.array_equal(random_jpeg_images[0], random_jpeg_images[i]))
self.assertFalse(all(are_images_equal))
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testStatelessRandomJpegQuality(self):
# Test deterministic randomness in jpeg quality by checking that the same
# sequence of jpeg quality adjustments are returned each round given the
# same seed.
with test_util.use_gpu():
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
jpeg_quality = (40, 100)
seeds_list = [(1, 2), (3, 4)]
iterations = 2
random_jpeg_images_all = [[] for _ in range(iterations)]
for random_jpeg_images in random_jpeg_images_all:
for seed in seeds_list:
distorted_jpeg = image_ops.stateless_random_jpeg_quality(
image, jpeg_quality[0], jpeg_quality[1], seed=seed)
# Verify that the random jpeg image is different from the original
# jpeg image.
self.assertNotAllEqual(image, distorted_jpeg)
random_jpeg_images.append(self.evaluate(distorted_jpeg))
# Verify that the results are identical given the same seed.
for i in range(1, iterations):
self.assertAllEqual(random_jpeg_images_all[0],
random_jpeg_images_all[i])
def testAdjustJpegQuality(self):
# Test if image_ops.adjust_jpeg_quality works when jpeq quality
# is an int (not tensor) for backward compatibility.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
jpeg_quality = np.random.randint(40, 100)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality(
image, jpeg_quality)
with self.cached_session() as sess:
sess.run(adjust_jpeg_quality_image)
def testAdjustJpegQualityShape(self):
with self.cached_session():
image = constant_op.constant(
np.arange(24, dtype=np.uint8).reshape([2, 4, 3]))
adjusted_image = image_ops.adjust_jpeg_quality(image, 80)
adjusted_image.shape.assert_is_compatible_with([None, None, 3])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"),
(3, "lena_palette.png"), (4, "lena_palette_trns.png"))
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.cached_session():
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = self.evaluate([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, self.evaluate(image1))
def testSynthetic(self):
with self.cached_session():
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.cached_session():
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.cached_session():
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.cached_session():
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def _testValid(self, filename):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.cached_session():
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = self.evaluate([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testValid(self):
self._testValid("scan.gif")
self._testValid("optimized.gif")
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
def testAnimatedGif(self):
# Test if all frames in the animated GIF file is properly decoded.
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif = io_ops.read_file(os.path.join(base, "pendulum_sm.gif"))
gt_frame0 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame0.png"))
gt_frame1 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame1.png"))
gt_frame2 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame2.png"))
image = image_ops.decode_gif(gif)
frame0 = image_ops.decode_png(gt_frame0)
frame1 = image_ops.decode_png(gt_frame1)
frame2 = image_ops.decode_png(gt_frame2)
image, frame0, frame1, frame2 = self.evaluate([image, frame0, frame1,
frame2])
# Compare decoded gif frames with ground-truth data.
self.assertAllEqual(image[0], frame0)
self.assertAllEqual(image[1], frame1)
self.assertAllEqual(image[2], frame2)
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.cached_session():
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y, y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate, y_np, atol=1e-5)
def testNoConvert(self):
# Tests with Tensor.op requires a graph.
with ops.Graph().as_default():
# Make sure converting to the same data type creates only an identity op
with self.cached_session():
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEqual(y.op.type, "Identity")
self.assertEqual(y.op.inputs[0], image)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.cached_session():
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.cached_session():
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.cached_session():
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
def testConvertBetweenInt16AndInt8(self):
with self.cached_session():
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.cached_session():
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = self.evaluate(y)
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
# TODO(b/133851381): re-enable this test.
def disabledtestTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2], [4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18], [29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120], [193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var) # pylint: disable=invalid-unary-operand-type
# Scale the pixel-values by a float. This scales the total variation as
# well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
class FormatTest(test_util.TensorFlowTestCase):
def testFormats(self):
prefix = "tensorflow/core/lib"
paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg",
"gif/testdata/lena.gif")
decoders = {
"jpeg": functools.partial(image_ops.decode_jpeg, channels=3),
"png": functools.partial(image_ops.decode_png, channels=3),
"gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),
}
with self.cached_session():
for path in paths:
contents = self.evaluate(io_ops.read_file(os.path.join(prefix, path)))
images = {}
for name, decode in decoders.items():
image = self.evaluate(decode(contents))
self.assertEqual(image.ndim, 3)
for prev_name, prev in images.items():
print("path %s, names %s %s, shapes %s %s" %
(path, name, prev_name, image.shape, prev.shape))
self.assertAllEqual(image, prev)
images[name] = image
def testError(self):
path = "tensorflow/core/lib/gif/testdata/scan.gif"
with self.cached_session():
for decode in image_ops.decode_jpeg, image_ops.decode_png:
with self.assertRaisesOpError(r"Got 12 frames"):
decode(io_ops.read_file(path)).eval()
class CombinedNonMaxSuppressionTest(test_util.TensorFlowTestCase):
# NOTE(b/142795960): parameterized tests do not work well with tf.tensor
# inputs. Due to failures, creating another test `testInvalidTensorInput`
# which is identical to this one except that the input here is a scalar as
# opposed to a tensor.
def testInvalidPyInput(self):
boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]]
scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
max_output_size_per_class = 5
max_total_size = 2**31
with self.assertRaisesRegex(
(TypeError, ValueError),
"type int64 that does not match expected type of int32|"
"Tensor conversion requested dtype int32 for Tensor with dtype int64"):
image_ops.combined_non_max_suppression(
boxes=boxes_np,
scores=scores_np,
max_output_size_per_class=max_output_size_per_class,
max_total_size=max_total_size)
# NOTE(b/142795960): parameterized tests do not work well with tf.tensor
# inputs. Due to failures, creating another this test which is identical to
# `testInvalidPyInput` except that the input is a tensor here as opposed
# to a scalar.
def testInvalidTensorInput(self):
boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]]
scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
max_output_size_per_class = 5
max_total_size = ops.convert_to_tensor(2**31)
with self.assertRaisesRegex(
(TypeError, ValueError),
"type int64 that does not match expected type of int32|"
"Tensor conversion requested dtype int32 for Tensor with dtype int64"):
image_ops.combined_non_max_suppression(
boxes=boxes_np,
scores=scores_np,
max_output_size_per_class=max_output_size_per_class,
max_total_size=max_total_size)
class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
def testNonMaxSuppression(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices, [3, 0, 5])
def testInvalidShape(self):
def nms_func(box, score, iou_thres, score_thres):
return image_ops.non_max_suppression(box, score, iou_thres, score_thres)
iou_thres = 3
score_thres = 0.5
# The boxes should be 2D of shape [num_boxes, 4].
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Shape must be rank 2 but is rank 1"):
boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, iou_thres, score_thres)
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Dimension must be 4 but is 3"):
boxes = constant_op.constant([[0.0, 0.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, iou_thres, score_thres)
# The boxes is of shape [num_boxes, 4], and the scores is
# of shape [num_boxes]. So an error will be thrown.
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Dimensions must be equal, but are 1 and 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9, 0.75])
nms_func(boxes, scores, iou_thres, score_thres)
# The scores should be 1D of shape [num_boxes].
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Shape must be rank 1 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([[0.9]])
nms_func(boxes, scores, iou_thres, score_thres)
# The max_output_size should be a scalar (0-D).
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Shape must be rank 0 but is rank 1"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, [iou_thres], score_thres)
# The iou_threshold should be a scalar (0-D).
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Shape must be rank 0 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, iou_thres, [[score_thres]])
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testTensors(self):
with context.eager_mode():
boxes_tensor = constant_op.constant([[6.625, 6.688, 272., 158.5],
[6.625, 6.75, 270.5, 158.4],
[5.375, 5., 272., 157.5]])
scores_tensor = constant_op.constant([0.84, 0.7944, 0.7715])
max_output_size = 100
iou_threshold = 0.5
score_threshold = 0.3
soft_nms_sigma = 0.25
pad_to_max_output_size = False
# gen_image_ops.non_max_suppression_v5.
for dtype in [np.float16, np.float32]:
boxes = math_ops.cast(boxes_tensor, dtype=dtype)
scores = math_ops.cast(scores_tensor, dtype=dtype)
_, _, num_selected = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma, pad_to_max_output_size)
self.assertEqual(num_selected.numpy(), 1)
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testDataTypes(self):
# Test case for GitHub issue 20199.
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = float("-inf")
# Note: There are multiple versions of non_max_suppression v2, v3, v4.
# gen_image_ops.non_max_suppression_v2:
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
selected_indices = gen_image_ops.non_max_suppression_v2(
boxes, scores, max_output_size, iou_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v3
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
selected_indices = gen_image_ops.non_max_suppression_v3(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v4.
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
selected_indices, _ = gen_image_ops.non_max_suppression_v4(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v5.
soft_nms_sigma_np = float(0.0)
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np, dtype=dtype)
selected_indices, _, _ = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
def testZeroIOUThreshold(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [1., 1., 1., 1., 1., 1.]
max_output_size_np = 3
iou_threshold_np = 0.0
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices, [0, 3, 5])
class NonMaxSuppressionWithScoresTest(test_util.TensorFlowTestCase):
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersWithSoftNMS(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 6
iou_threshold_np = 0.5
score_threshold_np = 0.0
soft_nms_sigma_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np)
selected_indices, selected_scores = \
image_ops.non_max_suppression_with_scores(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold,
soft_nms_sigma)
selected_indices, selected_scores = self.evaluate(
[selected_indices, selected_scores])
self.assertAllClose(selected_indices, [3, 0, 1, 5, 4, 2])
self.assertAllClose(selected_scores,
[0.95, 0.9, 0.384, 0.3, 0.256, 0.197],
rtol=1e-2, atol=1e-2)
class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices_padded, num_valid_padded = \
image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold):
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
yp, nvp = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
y, n = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(yp.shape.is_fully_defined(), True)
self.assertEqual(y.shape.is_fully_defined(), False)
return yp, nvp, y, n
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
selected_indices_padded, num_valid_padded, selected_indices, num_valid = \
func(boxes_np, scores_np, max_output_size_np, iou_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(self.evaluate(num_valid_padded), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(self.evaluate(num_valid), 3)
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold, score_threshold):
boxes = constant_op.constant(boxes)
scores = constant_op.constant(scores)
max_output_size = constant_op.constant(max_output_size)
iou_threshold = constant_op.constant(iou_threshold)
score_threshold = constant_op.constant(score_threshold)
y, nv = image_ops.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold, score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(y.shape.is_fully_defined(), False)
return y, nv
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
selected_indices, num_valid = func(boxes_np, scores_np,
max_output_size_np, iou_threshold_np,
score_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(self.evaluate(num_valid), 3)
def testInvalidDtype(self):
boxes_np = [[4.0, 6.0, 3.0, 6.0],
[2.0, 1.0, 5.0, 4.0],
[9.0, 0.0, 9.0, 9.0]]
scores = [5.0, 6.0, 5.0]
max_output_size = 2**31
with self.assertRaisesRegex(
(TypeError, ValueError), "type int64 that does not match type int32"):
boxes = constant_op.constant(boxes_np)
image_ops.non_max_suppression_padded(boxes, scores, max_output_size)
class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):
def testSelectOneFromThree(self):
overlaps_np = [
[1.0, 0.7, 0.2],
[0.7, 1.0, 0.0],
[0.2, 0.0, 1.0],
]
scores_np = [0.7, 0.9, 0.1]
max_output_size_np = 3
overlaps = constant_op.constant(overlaps_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
overlap_threshold = 0.6
score_threshold = 0.4
selected_indices = image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
with self.cached_session():
self.assertAllClose(selected_indices, [1])
class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):
"""Tests utility function used by ssim() and psnr()."""
def testWrongDims(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
img = array_ops.placeholder(dtype=dtypes.float32)
img_np = np.array((2, 2))
with self.cached_session() as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img: img_np})
def testShapeMismatch(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
img1 = array_ops.placeholder(dtype=dtypes.float32)
img2 = array_ops.placeholder(dtype=dtypes.float32)
img1_np = np.array([1, 2, 2, 1])
img2_np = np.array([1, 3, 3, 1])
with self.cached_session() as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(
img1, img2)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img1: img1_np, img2: img2_np})
class PSNRTest(test_util.TensorFlowTestCase):
"""Tests for PSNR."""
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/psnr/testdata", filename))
im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE")
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
q20 = self._LoadTestImage(sess, "cat_q20.jpg")
q72 = self._LoadTestImage(sess, "cat_q72.jpg")
q95 = self._LoadTestImage(sess, "cat_q95.jpg")
return q20, q72, q95
def _PSNR_NumPy(self, orig, target, max_value):
"""Numpy implementation of PSNR."""
mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))
return 20 * np.log10(max_value) - 10 * np.log10(mse)
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testPSNRSingleImage(self):
image1 = self._RandomImage((8, 8, 1), 1)
image2 = self._RandomImage((8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session():
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testPSNRMultiImage(self):
image1 = self._RandomImage((10, 8, 8, 1), 1)
image2 = self._RandomImage((10, 8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session():
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testGoldenPSNR(self):
q20, q72, q95 = self._LoadTestImages()
# Verify NumPy implementation first.
# Golden values are generated using GNU Octave's psnr() function.
psnr1 = self._PSNR_NumPy(q20, q72, 1)
self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype))
psnr2 = self._PSNR_NumPy(q20, q95, 1)
self.assertNear(29.994, psnr2, 0.001)
psnr3 = self._PSNR_NumPy(q72, q95, 1)
self.assertNear(35.302, psnr3, 0.001)
# Test TensorFlow implementation.
with self.cached_session():
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)
tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)
tf_psnr1 = self.evaluate(image_ops.psnr(tf_q20, tf_q72, 1, "psnr1"))
tf_psnr2 = self.evaluate(image_ops.psnr(tf_q20, tf_q95, 1, "psnr2"))
tf_psnr3 = self.evaluate(image_ops.psnr(tf_q72, tf_q95, 1, "psnr3"))
self.assertAllClose(psnr1, tf_psnr1, atol=0.001)
self.assertAllClose(psnr2, tf_psnr2, atol=0.001)
self.assertAllClose(psnr3, tf_psnr3, atol=0.001)
def testInfinity(self):
q20, _, _ = self._LoadTestImages()
psnr = self._PSNR_NumPy(q20, q20, 1)
with self.cached_session():
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_q20, tf_q20, 1, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testInt(self):
img1 = self._RandomImage((10, 8, 8, 1), 255)
img2 = self._RandomImage((10, 8, 8, 1), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
psnr_uint8 = image_ops.psnr(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
psnr_float32 = image_ops.psnr(img1, img2, 1.0)
with self.cached_session():
self.assertAllClose(
self.evaluate(psnr_uint8), self.evaluate(psnr_float32), atol=0.001)
class SSIMTest(test_util.TensorFlowTestCase):
"""Tests for SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_ssim = np.asarray([[1.000000, 0.230880, 0.231153],
[0.230880, 1.000000, 0.996828],
[0.231153, 0.996828, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against values produced by Matlab."""
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testBatch(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBatchNumpyInputs(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
with self.cached_session():
img1 = self.evaluate(constant_op.constant(img1))
img2 = self.evaluate(constant_op.constant(img2))
ssim = image_ops.ssim(
img1,
img2,
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBroadcast(self):
img = self._LoadTestImages()[:2]
expected = self._ssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
ssim = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testNegative(self):
"""Tests against negative SSIM index."""
step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)
img1 = np.tile(step, (16, 1))
img2 = np.fliplr(img1)
img1 = img1.reshape((1, 16, 16, 1))
img2 = img2.reshape((1, 16, 16, 1))
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertLess(self.evaluate(ssim), 0)
def testInt(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
class MultiscaleSSIMTest(test_util.TensorFlowTestCase):
"""Tests for MS-SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_msssim = np.asarray([[1.000000, 0.091016, 0.091025],
[0.091016, 1.000000, 0.999567],
[0.091025, 0.999567, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against MS-SSIM computed with Matlab implementation.
For color images, MS-SSIM scores are averaged over color channels.
"""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim_multiscale(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testUnweightedIsDifferentiable(self):
img = self._LoadTestImages()
@def_function.function
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testUnweightedIsDifferentiableEager(self):
if not context.executing_eagerly():
self.skipTest("Eager mode only")
img = self._LoadTestImages()
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testBatch(self):
"""Tests MS-SSIM computed in batch."""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
msssim = image_ops.ssim_multiscale(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(msssim), 1e-4)
def testBroadcast(self):
"""Tests MS-SSIM broadcasting."""
img = self._LoadTestImages()[:2]
expected = self._msssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
score_tensor = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4)
def testRange(self):
"""Tests against low MS-SSIM score.
MS-SSIM is a geometric mean of SSIM and CS scores of various scales.
If any of the value is negative so that the geometric mean is not
well-defined, then treat the MS-SSIM score as zero.
"""
with self.cached_session() as sess:
img1 = self._LoadTestImage(sess, "checkerboard1.png")
img2 = self._LoadTestImage(sess, "checkerboard3.png")
images = [img1, img2, np.zeros_like(img1),
np.full_like(img1, fill_value=255)]
images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]
msssim_ops = [
image_ops.ssim_multiscale(
x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
for x, y in itertools.combinations(images, 2)
]
msssim = self.evaluate(msssim_ops)
msssim = np.squeeze(msssim)
self.assertTrue(np.all(msssim >= 0.0))
self.assertTrue(np.all(msssim <= 1.0))
def testInt(self):
img1 = self._RandomImage((1, 180, 240, 3), 255)
img2 = self._RandomImage((1, 180, 240, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim_multiscale(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
def testNumpyInput(self):
"""Test case for GitHub issue 28241."""
image = np.random.random([512, 512, 1])
score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0)
with self.cached_session():
_ = self.evaluate(score_tensor)
class ImageGradientsTest(test_util.TensorFlowTestCase):
def testImageGradients(self):
shape = [1, 2, 4, 1]
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
img = array_ops.reshape(img, shape)
expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)
expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)
dy, dx = image_ops.image_gradients(img)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsMultiChannelBatch(self):
batch = [[[[1, 2], [2, 5], [3, 3]],
[[8, 4], [5, 1], [9, 8]]],
[[[5, 3], [7, 9], [1, 6]],
[[1, 2], [6, 3], [6, 3]]]]
expected_dy = [[[[7, 2], [3, -4], [6, 5]],
[[0, 0], [0, 0], [0, 0]]],
[[[-4, -1], [-1, -6], [5, -3]],
[[0, 0], [0, 0], [0, 0]]]]
expected_dx = [[[[1, 3], [1, -2], [0, 0]],
[[-3, -3], [4, 7], [0, 0]]],
[[[2, 6], [-6, -3], [0, 0]],
[[5, 1], [0, 0], [0, 0]]]]
batch = constant_op.constant(batch)
assert batch.get_shape().as_list() == [2, 2, 3, 2]
dy, dx = image_ops.image_gradients(batch)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsBadShape(self):
# [2 x 4] image but missing batch and depth dimensions.
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
with self.assertRaises(ValueError):
image_ops.image_gradients(img)
class SobelEdgesTest(test_util.TensorFlowTestCase):
def disabled_testSobelEdges1x2x3x1(self):
img = constant_op.constant([[1, 3, 6], [4, 1, 5]],
dtype=dtypes.float32, shape=[1, 2, 3, 1])
expected = np.reshape([[[0, 0], [0, 12], [0, 0]],
[[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])
sobel = image_ops.sobel_edges(img)
with self.cached_session():
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected, actual_sobel)
def testSobelEdges5x3x4x2(self):
batch_size = 5
plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],
[1, 3, 4, 1])
two_channel = np.concatenate([plane, plane], axis=3)
batch = np.concatenate([two_channel] * batch_size, axis=0)
img = constant_op.constant(batch, dtype=dtypes.float32,
shape=[batch_size, 3, 4, 2])
expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],
[[6, 0], [0, 6], [-6, 10], [-6, 0]],
[[0, 0], [0, 0], [0, 10], [0, 0]]],
[1, 3, 4, 1, 2])
expected_two_channel = np.concatenate(
[expected_plane, expected_plane], axis=3)
expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)
sobel = image_ops.sobel_edges(img)
with self.cached_session():
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected_batch, actual_sobel)
@test_util.run_all_in_graph_and_eager_modes
class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
_FORWARD_COMPATIBILITY_HORIZONS = [
(2020, 1, 1),
(2020, 7, 14),
(2525, 1, 1), # future behavior
]
def testBmpChannels(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with test_util.use_gpu():
base = "tensorflow/core/lib/bmp/testdata"
# `rgba_transparent.bmp` has 4 channels with transparent pixels.
# Test consistency between `decode_image` and `decode_bmp` functions.
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
image0 = image_ops.decode_image(bmp0, channels=4)
image1 = image_ops.decode_bmp(bmp0, channels=4)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 4 channels.
# Note that this operation simply drops 4th channel information. This
# is the same behavior as `decode_png`.
# e.g. pixel values [25, 25, 25, 100] becomes [25, 25, 25].
bmp1 = io_ops.read_file(os.path.join(base, "rgb_small.bmp"))
image2 = image_ops.decode_bmp(bmp0, channels=3)
image3 = image_ops.decode_bmp(bmp1)
image2, image3 = self.evaluate([image2, image3])
self.assertAllEqual(image2, image3)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 3 channels. Alpha channel should be set to
# UINT8_MAX.
bmp3 = io_ops.read_file(os.path.join(base, "rgb_small_255.bmp"))
bmp4 = io_ops.read_file(os.path.join(base, "rgba_small_255.bmp"))
image4 = image_ops.decode_bmp(bmp3, channels=4)
image5 = image_ops.decode_bmp(bmp4)
image4, image5 = self.evaluate([image4, image5])
self.assertAllEqual(image4, image5)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 1 channel (grayscale).
bmp6 = io_ops.read_file(os.path.join(base, "grayscale_small.bmp"))
bmp7 = io_ops.read_file(
os.path.join(base, "grayscale_small_3channels.bmp"))
image6 = image_ops.decode_bmp(bmp6, channels=3)
image7 = image_ops.decode_bmp(bmp7)
image6, image7 = self.evaluate([image6, image7])
self.assertAllEqual(image6, image7)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 1 channel (grayscale). Alpha channel should be
# set to UINT8_MAX.
bmp9 = io_ops.read_file(
os.path.join(base, "grayscale_small_4channels.bmp"))
image8 = image_ops.decode_bmp(bmp6, channels=4)
image9 = image_ops.decode_bmp(bmp9)
image8, image9 = self.evaluate([image8, image9])
self.assertAllEqual(image8, image9)
def testJpegUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# NumPy conversions should happen before
x = np.random.randint(256, size=(4, 4, 3), dtype=np.uint16)
x_str = image_ops_impl.encode_png(x)
x_dec = image_ops_impl.decode_image(
x_str, channels=3, dtype=dtypes.uint16)
self.assertAllEqual(x, x_dec)
def testGifUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testJpegFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testExpandAnimations(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
# Test `expand_animations=False` case.
image0 = image_ops.decode_image(
gif0, dtype=dtypes.float32, expand_animations=False)
# image_ops.decode_png() handles GIFs and returns 3D tensors
animation = image_ops.decode_gif(gif0)
first_frame = array_ops.gather(animation, 0)
image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertLen(image0.shape, 3)
self.assertAllEqual(list(image0.shape), [40, 20, 3])
self.assertAllEqual(image0, image1)
# Test `expand_animations=True` case.
image2 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image3 = image_ops.convert_image_dtype(animation, dtypes.float32)
image2, image3 = self.evaluate([image2, image3])
self.assertLen(image2.shape, 4)
self.assertAllEqual(list(image2.shape), [12, 40, 20, 3])
self.assertAllEqual(image2, image3)
def testImageCropAndResize(self):
if test_util.is_gpu_available():
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
else:
message = "Boxes contains at least one element that is not finite"
with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
message):
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
@parameterized.named_parameters(
("_jpeg", "JPEG", "jpeg_merge_test1.jpg"),
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
)
def testWrongOpBmp(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = "Trying to decode " + img_format + " format using DecodeBmp op"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_bmp(img_bytes)
self.evaluate(img)
@parameterized.named_parameters(
("_jpeg", image_ops.decode_jpeg, "DecodeJpeg"),
("_png", image_ops.decode_png, "DecodePng"),
("_gif", image_ops.decode_gif, "DecodeGif"),
)
def testWrongOp(self, decode_op, op_used):
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
err_msg = ("Trying to decode BMP format using a wrong op. Use `decode_bmp` "
"or `decode_image` instead. Op used: ") + op_used
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img = decode_op(bmp0)
self.evaluate(img)
@parameterized.named_parameters(
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
("_bmp", "BMP", "rgba_small.bmp"),
)
def testWrongOpJpeg(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = ("DecodeAndCropJpeg operation can run on JPEG only, but "
"detected ") + img_format
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_and_crop_jpeg(img_bytes, [1, 1, 2, 2])
self.evaluate(img)
def testGifFramesWithDiffSize(self):
"""Test decoding an animated GIF.
This test verifies that `decode_image` op can decode animated GIFs whose
first frame does not fill the canvas. The unoccupied areas should be filled
with zeros (black).
`squares.gif` is animated with two images of different sizes. It
alternates between a smaller image of size 10 x 10 and a larger image of
size 16 x 16. Because it starts animating with the smaller image, the first
frame does not fill the canvas. (Canvas size is equal to max frame width x
max frame height.)
`red_black.gif` has just a single image in a GIF format. It is the same
image as the smaller image (size 10 x 10) of the two images in
`squares.gif`. The only difference is that its background (canvas - smaller
image) is pre-filled with zeros (black); it is the groundtruth.
"""
base = "tensorflow/core/lib/gif/testdata"
gif_bytes0 = io_ops.read_file(os.path.join(base, "squares.gif"))
image0 = image_ops.decode_image(gif_bytes0, dtype=dtypes.float32,
expand_animations=False)
gif_bytes1 = io_ops.read_file(os.path.join(base, "red_black.gif"))
image1 = image_ops.decode_image(gif_bytes1, dtype=dtypes.float32)
image1_0 = array_ops.gather(image1, 0)
image0, image1_0 = self.evaluate([image0, image1_0])
self.assertAllEqual(image0, image1_0)
if __name__ == "__main__":
googletest.main()
|
PYSEC-2021-392
|
jupyter_server_proxy/__init__.py
|
@@ -3,6 +3,8 @@
from jupyter_server.utils import url_path_join as ujoin
from .api import ServersInfoHandler, IconHandler
+__version__ = "3.2.3"
+
# Jupyter Extension points
def _jupyter_server_extension_points():
return [{
|
from .handlers import setup_handlers
from .config import ServerProxy as ServerProxyConfig, make_handlers, get_entrypoint_server_processes, make_server_process
from jupyter_server.utils import url_path_join as ujoin
from .api import ServersInfoHandler, IconHandler
# Jupyter Extension points
def _jupyter_server_extension_points():
return [{
'module': 'jupyter_server_proxy',
}]
def _jupyter_nbextension_paths():
return [{
"section": "tree",
"dest": "jupyter_server_proxy",
'src': 'static',
"require": "jupyter_server_proxy/tree"
}]
def _load_jupyter_server_extension(nbapp):
# Set up handlers picked up via config
base_url = nbapp.web_app.settings['base_url']
serverproxy_config = ServerProxyConfig(parent=nbapp)
server_processes = [
make_server_process(name, server_process_config, serverproxy_config)
for name, server_process_config in serverproxy_config.servers.items()
]
server_processes += get_entrypoint_server_processes(serverproxy_config)
server_handlers = make_handlers(base_url, server_processes)
nbapp.web_app.add_handlers('.*', server_handlers)
# Set up default non-server handler
setup_handlers(
nbapp.web_app,
serverproxy_config,
)
icons = {}
for sp in server_processes:
if sp.launcher_entry.enabled and sp.launcher_entry.icon_path:
icons[sp.name] = sp.launcher_entry.icon_path
nbapp.web_app.add_handlers('.*', [
(ujoin(base_url, 'server-proxy/servers-info'), ServersInfoHandler, {'server_processes': server_processes}),
(ujoin(base_url, 'server-proxy/icon/(.*)'), IconHandler, {'icons': icons}),
])
# For backward compatibility
load_jupyter_server_extension = _load_jupyter_server_extension
_jupyter_server_extension_paths = _jupyter_server_extension_points
|
GHSA-w3vc-fx9p-wp4v
|
jupyter_server_proxy/handlers.py
|
@@ -124,6 +124,39 @@ def check_origin(self, origin=None):
async def open(self, port, proxied_path):
raise NotImplementedError('Subclasses of ProxyHandler should implement open')
+ async def prepare(self, *args, **kwargs):
+ """
+ Enforce authentication on *all* requests.
+
+ This method is called *before* any other method for all requests.
+ See https://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.prepare.
+ """
+ # Due to https://github.com/jupyter-server/jupyter_server/issues/1012,
+ # we can not decorate `prepare` with `@web.authenticated`.
+ # `super().prepare`, which calls `JupyterHandler.prepare`, *must* be called
+ # before `@web.authenticated` can work. Since `@web.authenticated` is a decorator
+ # that relies on the decorated method to get access to request information, we can
+ # not call it directly. Instead, we create an empty lambda that takes a request_handler,
+ # decorate that with web.authenticated, and call the decorated function.
+ # super().prepare became async with jupyter_server v2
+ _prepared = super().prepare(*args, **kwargs)
+ if _prepared is not None:
+ await _prepared
+
+ # If this is a GET request that wants to be upgraded to a websocket, users not
+ # already authenticated gets a straightforward 403. Everything else is dealt
+ # with by `web.authenticated`, which does a 302 to the appropriate login url.
+ # Websockets are purely API calls made by JS rather than a direct user facing page,
+ # so redirects do not make sense for them.
+ if (
+ self.request.method == "GET"
+ and self.request.headers.get("Upgrade", "").lower() == "websocket"
+ ):
+ if not self.current_user:
+ raise web.HTTPError(403)
+ else:
+ web.authenticated(lambda request_handler: None)(self)
+
async def http_get(self, host, port, proxy_path=''):
'''Our non-websocket GET.'''
raise NotImplementedError('Subclasses of ProxyHandler should implement http_get')
@@ -265,7 +298,6 @@ def _check_host_allowlist(self, host):
else:
return host in self.host_allowlist
- @web.authenticated
async def proxy(self, host, port, proxied_path):
'''
This serverextension handles:
@@ -664,7 +696,6 @@ async def ensure_process(self):
raise
- @web.authenticated
async def proxy(self, port, path):
if not path.startswith('/'):
path = '/' + path
|
"""
Authenticated HTTP proxy for Jupyter Notebooks
Some original inspiration from https://github.com/senko/tornado-proxy
"""
import inspect
import socket
import os
from urllib.parse import urlunparse, urlparse, quote
import aiohttp
from asyncio import Lock
from copy import copy
from tornado import gen, web, httpclient, httputil, process, websocket, ioloop, version_info
from jupyter_server.utils import ensure_async, url_path_join
from jupyter_server.base.handlers import JupyterHandler, utcnow
from traitlets.traitlets import HasTraits
from traitlets import Bytes, Dict, Instance, Integer, Unicode, Union, default, observe
from .utils import call_with_asked_args
from .websocket import WebSocketHandlerMixin, pingable_ws_connect
from simpervisor import SupervisedProcess
class RewritableResponse(HasTraits):
"""
A class to hold the response to be rewritten by rewrite_response
"""
# The following should not be modified (or even accessed) by rewrite_response.
# It is used to initialize the default values of the traits.
orig_response = Instance(klass=httpclient.HTTPResponse)
# The following are modifiable by rewrite_response
headers = Union(trait_types=[Dict(), Instance(klass=httputil.HTTPHeaders)])
body = Bytes()
code = Integer()
reason = Unicode(allow_none=True)
@default('headers')
def _default_headers(self):
return copy(self.orig_response.headers)
@default('body')
def _default_body(self):
return self.orig_response.body
@default('code')
def _default_code(self):
return self.orig_response.code
@default('reason')
def _default_reason(self):
return self.orig_response.reason
@observe('code')
def _observe_code(self, change):
# HTTP status codes are mapped to short descriptions in the
# httputil.responses dictionary, 200 maps to "OK", 403 maps to
# "Forbidden" etc.
#
# If code is updated and it previously had a reason matching its short
# description, we update reason to match the new code's short
# description.
#
if self.reason == httputil.responses.get(change['old'], 'Unknown'):
self.reason = httputil.responses.get(change['new'], 'Unknown')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Trigger the default value to be set from orig_response on instantiation.
# Otherwise _observe_code will receive change['old'] == 0.
self.code
def _apply_to_copy(self, func):
"""
Apply a function to a copy of self, and return the copy
"""
new = copy(self)
func(new)
return new
class AddSlashHandler(JupyterHandler):
"""Add trailing slash to URLs that need them."""
@web.authenticated
def get(self, *args):
src = urlparse(self.request.uri)
dest = src._replace(path=src.path + '/')
self.redirect(urlunparse(dest))
class ProxyHandler(WebSocketHandlerMixin, JupyterHandler):
"""
A tornado request handler that proxies HTTP and websockets from
a given host/port combination. This class is not meant to be
used directly as a means of overriding CORS. This presents significant
security risks, and could allow arbitrary remote code access. Instead, it is
meant to be subclassed and used for proxying URLs from trusted sources.
Subclasses should implement open, http_get, post, put, delete, head, patch,
and options.
"""
def __init__(self, *args, **kwargs):
self.proxy_base = ''
self.absolute_url = kwargs.pop('absolute_url', False)
self.host_allowlist = kwargs.pop('host_allowlist', ['localhost', '127.0.0.1'])
self.rewrite_response = kwargs.pop(
'rewrite_response',
tuple(),
)
self.subprotocols = None
super().__init__(*args, **kwargs)
# Support/use jupyter_server config arguments allow_origin and allow_origin_pat
# to enable cross origin requests propagated by e.g. inverting proxies.
def check_origin(self, origin=None):
return JupyterHandler.check_origin(self, origin)
# Support all the methods that tornado does by default except for GET which
# is passed to WebSocketHandlerMixin and then to WebSocketHandler.
async def open(self, port, proxied_path):
raise NotImplementedError('Subclasses of ProxyHandler should implement open')
async def http_get(self, host, port, proxy_path=''):
'''Our non-websocket GET.'''
raise NotImplementedError('Subclasses of ProxyHandler should implement http_get')
def post(self, host, port, proxy_path=''):
raise NotImplementedError('Subclasses of ProxyHandler should implement this post')
def put(self, port, proxy_path=''):
raise NotImplementedError('Subclasses of ProxyHandler should implement this put')
def delete(self, host, port, proxy_path=''):
raise NotImplementedError('Subclasses of ProxyHandler should implement delete')
def head(self, host, port, proxy_path=''):
raise NotImplementedError('Subclasses of ProxyHandler should implement head')
def patch(self, host, port, proxy_path=''):
raise NotImplementedError('Subclasses of ProxyHandler should implement patch')
def options(self, host, port, proxy_path=''):
raise NotImplementedError('Subclasses of ProxyHandler should implement options')
def on_message(self, message):
"""
Called when we receive a message from our client.
We proxy it to the backend.
"""
self._record_activity()
if hasattr(self, 'ws'):
self.ws.write_message(message, binary=isinstance(message, bytes))
def on_ping(self, data):
"""
Called when the client pings our websocket connection.
We proxy it to the backend.
"""
self.log.debug('jupyter_server_proxy: on_ping: {}'.format(data))
self._record_activity()
if hasattr(self, 'ws'):
self.ws.protocol.write_ping(data)
def on_pong(self, data):
"""
Called when we receive a ping back.
"""
self.log.debug('jupyter_server_proxy: on_pong: {}'.format(data))
def on_close(self):
"""
Called when the client closes our websocket connection.
We close our connection to the backend too.
"""
if hasattr(self, 'ws'):
self.ws.close()
def _record_activity(self):
"""Record proxied activity as API activity
avoids proxied traffic being ignored by the notebook's
internal idle-shutdown mechanism
"""
self.settings['api_last_activity'] = utcnow()
def _get_context_path(self, host, port):
"""
Some applications need to know where they are being proxied from.
This is either:
- {base_url}/proxy/{port}
- {base_url}/proxy/{host}:{port}
- {base_url}/proxy/absolute/{port}
- {base_url}/proxy/absolute/{host}:{port}
- {base_url}/{proxy_base}
"""
host_and_port = str(port) if host == 'localhost' else host + ":" + str(port)
if self.proxy_base:
return url_path_join(self.base_url, self.proxy_base)
if self.absolute_url:
return url_path_join(self.base_url, 'proxy', 'absolute', host_and_port)
else:
return url_path_join(self.base_url, 'proxy', host_and_port)
def get_client_uri(self, protocol, host, port, proxied_path):
if self.absolute_url:
context_path = self._get_context_path(host, port)
client_path = url_path_join(context_path, proxied_path)
else:
client_path = proxied_path
# ensure client_path always starts with '/'
if not client_path.startswith("/"):
client_path = "/" + client_path
# Quote spaces, åäö and such, but only enough to send a valid web
# request onwards. To do this, we mark the RFC 3986 specs' "reserved"
# and "un-reserved" characters as safe that won't need quoting. The
# un-reserved need to be marked safe to ensure the quote function behave
# the same in py36 as py37.
#
# ref: https://tools.ietf.org/html/rfc3986#section-2.2
client_path = quote(client_path, safe=":/?#[]@!$&'()*+,;=-._~")
client_uri = '{protocol}://{host}:{port}{path}'.format(
protocol=protocol,
host=host,
port=port,
path=client_path,
)
if self.request.query:
client_uri += '?' + self.request.query
return client_uri
def _build_proxy_request(self, host, port, proxied_path, body):
headers = self.proxy_request_headers()
client_uri = self.get_client_uri('http', host, port, proxied_path)
# Some applications check X-Forwarded-Context and X-ProxyContextPath
# headers to see if and where they are being proxied from.
if not self.absolute_url:
context_path = self._get_context_path(host, port)
headers['X-Forwarded-Context'] = context_path
headers['X-ProxyContextPath'] = context_path
# to be compatible with flask/werkzeug wsgi applications
headers['X-Forwarded-Prefix'] = context_path
req = httpclient.HTTPRequest(
client_uri, method=self.request.method, body=body,
decompress_response=False,
headers=headers, **self.proxy_request_options())
return req
def _check_host_allowlist(self, host):
if callable(self.host_allowlist):
return self.host_allowlist(self, host)
else:
return host in self.host_allowlist
@web.authenticated
async def proxy(self, host, port, proxied_path):
'''
This serverextension handles:
{base_url}/proxy/{port([0-9]+)}/{proxied_path}
{base_url}/proxy/absolute/{port([0-9]+)}/{proxied_path}
{base_url}/{proxy_base}/{proxied_path}
'''
if not self._check_host_allowlist(host):
self.set_status(403)
self.write("Host '{host}' is not allowed. "
"See https://jupyter-server-proxy.readthedocs.io/en/latest/arbitrary-ports-hosts.html for info.".format(host=host))
return
# Remove hop-by-hop headers that don't necessarily apply to the request we are making
# to the backend. See https://github.com/jupyterhub/jupyter-server-proxy/pull/328
# for more information
hop_by_hop_headers = [
'Proxy-Connection',
'Keep-Alive',
'Transfer-Encoding',
'TE',
'Connection',
'Trailer',
'Upgrade',
'Proxy-Authorization',
'Proxy-Authenticate'
]
for header_to_remove in hop_by_hop_headers:
if header_to_remove in self.request.headers:
del self.request.headers[header_to_remove]
self._record_activity()
if self.request.headers.get("Upgrade", "").lower() == 'websocket':
# We wanna websocket!
# jupyterhub/jupyter-server-proxy@36b3214
self.log.info("we wanna websocket, but we don't define WebSocketProxyHandler")
self.set_status(500)
body = self.request.body
if not body:
if self.request.method in {'POST', 'PUT'}:
body = b''
else:
body = None
client = httpclient.AsyncHTTPClient()
req = self._build_proxy_request(host, port, proxied_path, body)
self.log.debug(f"Proxying request to {req.url}")
try:
# Here, "response" is a tornado.httpclient.HTTPResponse object.
response = await client.fetch(req, raise_error=False)
except httpclient.HTTPError as err:
# We need to capture the timeout error even with raise_error=False,
# because it only affects the HTTPError raised when a non-200 response
# code is used, instead of suppressing all errors.
# Ref: https://www.tornadoweb.org/en/stable/httpclient.html#tornado.httpclient.AsyncHTTPClient.fetch
if err.code == 599:
self._record_activity()
self.set_status(599)
self.write(str(err))
return
else:
raise
# record activity at start and end of requests
self._record_activity()
# For all non http errors...
if response.error and type(response.error) is not httpclient.HTTPError:
self.set_status(500)
self.write(str(response.error))
else:
# Represent the original response as a RewritableResponse object.
original_response = RewritableResponse(orig_response=response)
# The function (or list of functions) which should be applied to modify the
# response.
rewrite_response = self.rewrite_response
# If this is a single function, wrap it in a list.
if isinstance(rewrite_response, (list, tuple)):
rewrite_responses = rewrite_response
else:
rewrite_responses = [rewrite_response]
# To be passed on-demand as args to the rewrite_response functions.
optional_args_to_rewrite_function = {
'request': self.request,
'orig_response': original_response,
'host': host,
'port': port,
'path': proxied_path
}
# Initial value for rewriting
rewritten_response = original_response
for rewrite in rewrite_responses:
# The rewrite function is a function of the RewritableResponse object
# ``response`` as well as several other optional arguments. We need to
# convert it to a function of only ``response`` by plugging in the
# known values for all the other parameters. (This is called partial
# evaluation.)
def rewrite_pe(rewritable_response: RewritableResponse):
return call_with_asked_args(
rewrite,
{
'response': rewritable_response,
**optional_args_to_rewrite_function
}
)
# Now we can cleanly apply the partially evaulated function to a copy of
# the rewritten response.
rewritten_response = rewritten_response._apply_to_copy(rewrite_pe)
## status
self.set_status(rewritten_response.code, rewritten_response.reason)
# clear tornado default header
self._headers = httputil.HTTPHeaders()
for header, v in rewritten_response.headers.get_all():
if header not in ('Content-Length', 'Transfer-Encoding',
'Connection'):
# some header appear multiple times, eg 'Set-Cookie'
self.add_header(header, v)
if rewritten_response.body:
self.write(rewritten_response.body)
async def proxy_open(self, host, port, proxied_path=''):
"""
Called when a client opens a websocket connection.
We establish a websocket connection to the proxied backend &
set up a callback to relay messages through.
"""
if not self._check_host_allowlist(host):
self.set_status(403)
self.log.info("Host '{host}' is not allowed. "
"See https://jupyter-server-proxy.readthedocs.io/en/latest/arbitrary-ports-hosts.html for info.".format(host=host))
self.close()
return
if not proxied_path.startswith('/'):
proxied_path = '/' + proxied_path
client_uri = self.get_client_uri('ws', host, port, proxied_path)
headers = self.proxy_request_headers()
def message_cb(message):
"""
Callback when the backend sends messages to us
We just pass it back to the frontend
"""
# Websockets support both string (utf-8) and binary data, so let's
# make sure we signal that appropriately when proxying
self._record_activity()
if message is None:
self.close()
else:
self.write_message(message, binary=isinstance(message, bytes))
def ping_cb(data):
"""
Callback when the backend sends pings to us.
We just pass it back to the frontend.
"""
self._record_activity()
self.ping(data)
async def start_websocket_connection():
self.log.info('Trying to establish websocket connection to {}'.format(client_uri))
self._record_activity()
request = httpclient.HTTPRequest(url=client_uri, headers=headers)
self.ws = await pingable_ws_connect(request=request,
on_message_callback=message_cb, on_ping_callback=ping_cb,
subprotocols=self.subprotocols)
self._record_activity()
self.log.info('Websocket connection established to {}'.format(client_uri))
# Wait for the WebSocket to be connected before resolving.
# Otherwise, messages sent by the client before the
# WebSocket successful connection would be dropped.
await start_websocket_connection()
def proxy_request_headers(self):
'''A dictionary of headers to be used when constructing
a tornado.httpclient.HTTPRequest instance for the proxy request.'''
headers = self.request.headers.copy()
# Merge any manually configured request headers
headers.update(self.get_request_headers_override())
return headers
def get_request_headers_override(self):
'''Add additional request headers. Typically overridden in subclasses.'''
return {}
def proxy_request_options(self):
'''A dictionary of options to be used when constructing
a tornado.httpclient.HTTPRequest instance for the proxy request.'''
return dict(follow_redirects=False, connect_timeout=250.0, request_timeout=300.0)
def check_xsrf_cookie(self):
'''
http://www.tornadoweb.org/en/stable/guide/security.html
Defer to proxied apps.
'''
pass
def select_subprotocol(self, subprotocols):
'''Select a single Sec-WebSocket-Protocol during handshake.'''
self.subprotocols = subprotocols
if isinstance(subprotocols, list) and subprotocols:
self.log.debug('Client sent subprotocols: {}'.format(subprotocols))
return subprotocols[0]
return super().select_subprotocol(subprotocols)
class LocalProxyHandler(ProxyHandler):
"""
A tornado request handler that proxies HTTP and websockets
from a port on the local system. Same as the above ProxyHandler,
but specific to 'localhost'.
The arguments "port" and "proxied_path" in each method are extracted from
the URL as capture groups in the regex specified in the add_handlers
method.
"""
async def http_get(self, port, proxied_path):
return await self.proxy(port, proxied_path)
async def open(self, port, proxied_path):
return await self.proxy_open('localhost', port, proxied_path)
def post(self, port, proxied_path):
return self.proxy(port, proxied_path)
def put(self, port, proxied_path):
return self.proxy(port, proxied_path)
def delete(self, port, proxied_path):
return self.proxy(port, proxied_path)
def head(self, port, proxied_path):
return self.proxy(port, proxied_path)
def patch(self, port, proxied_path):
return self.proxy(port, proxied_path)
def options(self, port, proxied_path):
return self.proxy(port, proxied_path)
def proxy(self, port, proxied_path):
return super().proxy('localhost', port, proxied_path)
class RemoteProxyHandler(ProxyHandler):
"""
A tornado request handler that proxies HTTP and websockets
from a port on a specified remote system.
The arguments "host", "port" and "proxied_path" in each method are
extracted from the URL as capture groups in the regex specified in the
add_handlers method.
"""
async def http_get(self, host, port, proxied_path):
return await self.proxy(host, port, proxied_path)
def post(self, host, port, proxied_path):
return self.proxy(host, port, proxied_path)
def put(self, host, port, proxied_path):
return self.proxy(host, port, proxied_path)
def delete(self, host, port, proxied_path):
return self.proxy(host, port, proxied_path)
def head(self, host, port, proxied_path):
return self.proxy(host, port, proxied_path)
def patch(self, host, port, proxied_path):
return self.proxy(host, port, proxied_path)
def options(self, host, port, proxied_path):
return self.proxy(host, port, proxied_path)
async def open(self, host, port, proxied_path):
return await self.proxy_open(host, port, proxied_path)
def proxy(self, host, port, proxied_path):
return super().proxy(host, port, proxied_path)
# FIXME: Move this to its own file. Too many packages now import this from nbrserverproxy.handlers
class SuperviseAndProxyHandler(LocalProxyHandler):
'''Manage a given process and requests to it '''
def __init__(self, *args, **kwargs):
self.requested_port = 0
self.mappath = {}
super().__init__(*args, **kwargs)
def initialize(self, state):
self.state = state
if 'proc_lock' not in state:
state['proc_lock'] = Lock()
name = 'process'
@property
def port(self):
"""
Allocate either the requested port or a random empty port for use by
application
"""
if 'port' not in self.state:
sock = socket.socket()
sock.bind(('', self.requested_port))
self.state['port'] = sock.getsockname()[1]
sock.close()
return self.state['port']
def get_cwd(self):
"""Get the current working directory for our process
Override in subclass to launch the process in a directory
other than the current.
"""
return os.getcwd()
def get_env(self):
'''Set up extra environment variables for process. Typically
overridden in subclasses.'''
return {}
def get_timeout(self):
"""
Return timeout (in s) to wait before giving up on process readiness
"""
return 5
async def _http_ready_func(self, p):
url = 'http://localhost:{}'.format(self.port)
async with aiohttp.ClientSession() as session:
try:
async with session.get(url, allow_redirects=False) as resp:
# We only care if we get back *any* response, not just 200
# If there's an error response, that can be shown directly to the user
self.log.debug('Got code {} back from {}'.format(resp.status, url))
return True
except aiohttp.ClientConnectionError:
self.log.debug('Connection to {} refused'.format(url))
return False
async def ensure_process(self):
"""
Start the process
"""
# We don't want multiple requests trying to start the process at the same time
# FIXME: Make sure this times out properly?
# Invariant here should be: when lock isn't being held, either 'proc' is in state &
# running, or not.
async with self.state['proc_lock']:
if 'proc' not in self.state:
# FIXME: Prevent races here
# FIXME: Handle graceful exits of spawned processes here
cmd = self.get_cmd()
# Set up extra environment variables for process
server_env = os.environ.copy()
server_env.update(self.get_env())
timeout = self.get_timeout()
proc = SupervisedProcess(self.name, *cmd, env=server_env, ready_func=self._http_ready_func, ready_timeout=timeout, log=self.log)
self.state['proc'] = proc
try:
await proc.start()
is_ready = await proc.ready()
if not is_ready:
await proc.kill()
raise web.HTTPError(500, 'could not start {} in time'.format(self.name))
except:
# Make sure we remove proc from state in any error condition
del self.state['proc']
raise
@web.authenticated
async def proxy(self, port, path):
if not path.startswith('/'):
path = '/' + path
if self.mappath:
if callable(self.mappath):
path = call_with_asked_args(self.mappath, {'path': path})
else:
path = self.mappath.get(path, path)
await self.ensure_process()
return await ensure_async(super().proxy(self.port, path))
async def http_get(self, path):
return await ensure_async(self.proxy(self.port, path))
async def open(self, path):
await self.ensure_process()
return await super().open(self.port, path)
def post(self, path):
return self.proxy(self.port, path)
def put(self, path):
return self.proxy(self.port, path)
def delete(self, path):
return self.proxy(self.port, path)
def head(self, path):
return self.proxy(self.port, path)
def patch(self, path):
return self.proxy(self.port, path)
def options(self, path):
return self.proxy(self.port, path)
def setup_handlers(web_app, serverproxy_config):
host_allowlist = serverproxy_config.host_allowlist
rewrite_response = serverproxy_config.non_service_rewrite_response
web_app.add_handlers(
".*",
[
(
url_path_join(
web_app.settings["base_url"],
r"/proxy/([^/:@]+):(\d+)(/.*|)",
),
RemoteProxyHandler,
{
"absolute_url": False,
"host_allowlist": host_allowlist,
"rewrite_response": rewrite_response,
},
),
(
url_path_join(
web_app.settings["base_url"],
r"/proxy/absolute/([^/:@]+):(\d+)(/.*|)",
),
RemoteProxyHandler,
{
"absolute_url": True,
"host_allowlist": host_allowlist,
"rewrite_response": rewrite_response,
},
),
(
url_path_join(
web_app.settings["base_url"],
r"/proxy/(\d+)(/.*|)",
),
LocalProxyHandler,
{
"absolute_url": False,
"rewrite_response": rewrite_response,
},
),
(
url_path_join(
web_app.settings["base_url"],
r"/proxy/absolute/(\d+)(/.*|)",
),
LocalProxyHandler,
{
"absolute_url": True,
"rewrite_response": rewrite_response,
},
),
],
)
# vim: set et ts=4 sw=4:
|
GHSA-w3vc-fx9p-wp4v
|
setup.py
|
@@ -93,6 +93,7 @@
# acceptance tests additionally require firefox and geckodriver
"test": [
"pytest",
+ "pytest-asyncio",
"pytest-cov",
"pytest-html"
],
|
"""
jupyter-server-proxy setup
"""
import json
from glob import glob
from pathlib import Path
import setuptools
from jupyter_packaging import (
combine_commands,
create_cmdclass,
ensure_targets,
install_npm,
skip_if_exists,
)
HERE = Path(__file__).parent.resolve()
# The name of the project
name = "jupyter_server_proxy"
lab_path = HERE / name / "labextension"
# Representative files that should exist after a successful build
jstargets = [
str(lab_path / "package.json"),
]
package_data_spec = {
name: ["*"],
}
labext_name = "@jupyterlab/server-proxy"
data_files_spec = [
("share/jupyter/labextensions/%s" % labext_name, str(lab_path), "**"),
("share/jupyter/labextensions/%s" % labext_name, str(HERE), "install.json"),
(
"etc/jupyter/jupyter_server_config.d",
"jupyter_server_proxy/etc",
"jupyter-server-proxy-jupyterserverextension.json",
),
(
"etc/jupyter/jupyter_notebook_config.d",
"jupyter_server_proxy/etc",
"jupyter-server-proxy-notebookserverextension.json",
),
(
"etc/jupyter/nbconfig/tree.d",
"jupyter_server_proxy/etc",
"jupyter-server-proxy-nbextension.json",
),
]
cmdclass = create_cmdclass(
"jsdeps", package_data_spec=package_data_spec, data_files_spec=data_files_spec
)
js_command = combine_commands(
install_npm(HERE / "jupyterlab-server-proxy", build_cmd="build:prod", npm=["jlpm"]),
ensure_targets(jstargets),
)
is_repo = (HERE / ".git").exists()
if is_repo:
cmdclass["jsdeps"] = js_command
else:
cmdclass["jsdeps"] = skip_if_exists(jstargets, js_command)
long_description = (HERE / "README.md").read_text()
# Get the package info from package.json
pkg_json = json.loads((HERE / "jupyterlab-server-proxy" / "package.json").read_bytes())
setup_args = dict(
name=name.replace("_", "-"),
version=pkg_json["version"],
url=pkg_json["homepage"],
author=pkg_json["author"]["name"],
author_email=pkg_json["author"]["email"],
description=pkg_json["description"],
license=pkg_json["license"],
long_description=long_description,
long_description_content_type="text/markdown",
cmdclass=cmdclass,
packages=setuptools.find_packages(),
install_requires=[
"aiohttp",
"jupyter-server>=1.0",
"simpervisor>=0.4",
],
extras_require={
# acceptance tests additionally require firefox and geckodriver
"test": [
"pytest",
"pytest-cov",
"pytest-html"
],
"acceptance": [
"robotframework-jupyterlibrary"
]
},
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
keywords=["Jupyter", "JupyterLab", "JupyterLab3"],
classifiers=[
"Framework :: Jupyter",
"Framework :: Jupyter :: JupyterLab :: 2",
"Framework :: Jupyter :: JupyterLab :: 3",
"Framework :: Jupyter :: JupyterLab :: Extensions",
"Framework :: Jupyter :: JupyterLab :: Extensions :: Prebuilt",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Framework :: Jupyter",
],
data_files=[
("share/jupyter/nbextensions/jupyter_server_proxy", glob("jupyter_server_proxy/static/*")),
(
"etc/jupyter/jupyter_notebook_config.d",
["jupyter_server_proxy/etc/jupyter-server-proxy-notebookserverextension.json"],
),
(
"etc/jupyter/jupyter_server_config.d",
["jupyter_server_proxy/etc/jupyter-server-proxy-jupyterserverextension.json"],
),
(
"etc/jupyter/nbconfig/tree.d",
["jupyter_server_proxy/etc/jupyter-server-proxy-nbextension.json"],
),
],
)
if __name__ == "__main__":
setuptools.setup(**setup_args)
|
GHSA-w3vc-fx9p-wp4v
|
tests/test_proxies.py
|
@@ -6,6 +6,7 @@
from http.client import HTTPConnection
from urllib.parse import quote
import pytest
+from tornado.httpclient import HTTPClientError
from tornado.websocket import websocket_connect
PORT = os.getenv('TEST_PORT', 8888)
@@ -246,28 +247,19 @@ def test_server_content_encoding_header():
assert f.read() == b'this is a test'
[email protected](scope="module")
-def event_loop():
- loop = asyncio.get_event_loop()
- yield loop
- loop.close()
-
-
-async def _websocket_echo():
- url = "ws://localhost:{}/python-websocket/echosocket".format(PORT)
[email protected]
+async def test_server_proxy_websocket_messages():
+ url = "ws://localhost:{}/python-websocket/echosocket?token={}".format(PORT, TOKEN)
conn = await websocket_connect(url)
expected_msg = "Hello, world!"
await conn.write_message(expected_msg)
msg = await conn.read_message()
assert msg == expected_msg
-def test_server_proxy_websocket(event_loop):
- event_loop.run_until_complete(_websocket_echo())
-
-
-async def _websocket_headers():
- url = "ws://localhost:{}/python-websocket/headerssocket".format(PORT)
[email protected]
+async def test_server_proxy_websocket_headers():
+ url = "ws://localhost:{}/python-websocket/headerssocket?token={}".format(PORT, TOKEN)
conn = await websocket_connect(url)
await conn.write_message("Hello")
msg = await conn.read_message()
@@ -276,20 +268,23 @@ async def _websocket_headers():
assert headers['X-Custom-Header'] == 'pytest-23456'
-def test_server_proxy_websocket_headers(event_loop):
- event_loop.run_until_complete(_websocket_headers())
-
-
-async def _websocket_subprotocols():
- url = "ws://localhost:{}/python-websocket/subprotocolsocket".format(PORT)
[email protected]
+async def test_server_proxy_websocket_subprotocols():
+ url = "ws://localhost:{}/python-websocket/subprotocolsocket?token={}".format(PORT, TOKEN)
conn = await websocket_connect(url, subprotocols=["protocol_1", "protocol_2"])
await conn.write_message("Hello, world!")
msg = await conn.read_message()
assert json.loads(msg) == ["protocol_1", "protocol_2"]
-def test_server_proxy_websocket_subprotocols(event_loop):
- event_loop.run_until_complete(_websocket_subprotocols())
[email protected]
+async def test_websocket_no_auth_failure():
+ # Intentionally do not pass an appropriate token, which should cause a 403
+ url = "ws://localhost:{}/python-websocket/headerssocket".format(PORT)
+
+ with pytest.raises(HTTPClientError, match=r".*HTTP 403: Forbidden.*"):
+ await websocket_connect(url)
+
@pytest.mark.parametrize(
"proxy_path, status",
|
import asyncio
import gzip
from io import BytesIO
import json
import os
from http.client import HTTPConnection
from urllib.parse import quote
import pytest
from tornado.websocket import websocket_connect
PORT = os.getenv('TEST_PORT', 8888)
TOKEN = os.getenv('JUPYTER_TOKEN', 'secret')
def request_get(port, path, token, host='localhost'):
h = HTTPConnection(host, port, 10)
if '?' in path:
url = '{}&token={}'.format(path, token)
else:
url = '{}?token={}'.format(path, token)
h.request('GET', url)
return h.getresponse()
def test_server_proxy_minimal_proxy_path_encoding():
"""Test that we don't encode anything more than we must to have a valid web
request."""
special_path = quote("Hello world 123 åäö 🎉你好世界±¥ :/[]@!$&'()*+,;=-._~?key1=value1", safe=":/?#[]@!$&'()*+,;=-._~")
test_url = '/python-http/' + special_path
r = request_get(PORT, test_url, TOKEN)
assert r.code == 200
s = r.read().decode('ascii')
assert 'GET /{}&token='.format(special_path) in s
def test_server_proxy_hash_sign_encoding():
"""
FIXME: This is a test to establish the current behavior, but if it should be
like this is a separate question not yet addressed.
Related: https://github.com/jupyterhub/jupyter-server-proxy/issues/109
"""
h = HTTPConnection("localhost", PORT, 10)
# Case 0: a reference case
path = "?token={}".format(TOKEN)
h.request('GET', '/python-http/' + path)
r = h.getresponse()
assert r.code == 200
s = r.read().decode('ascii')
assert 'GET /{} '.format(path) in s
# Case 1: #bla?token=secret -> everything following # ignored -> redirect because no token
path = "#bla?token={}".format(TOKEN)
h.request('GET', '/python-http/' + path)
r = h.getresponse()
assert r.code == 200
s = r.read().decode('ascii')
assert 'GET / ' in s
# Case 2: %23bla?token=secret -> %23 is # -> everything following # ignored -> redirect because no token
path = "%23?token={}".format(TOKEN)
h.request('GET', '/python-http/' + path)
r = h.getresponse()
assert r.code == 200
s = r.read().decode('ascii')
assert 'GET / ' in s
# Case 3: ?token=secret#test -> invalid token -> jupyter notebook server errors: NoneType can't be used in 'await' expression
#
# [E 11:37:49.991 NotebookApp] Uncaught exception GET /python-http/?token=secrettest (127.0.0.1)
# HTTPServerRequest(protocol='http', host='localhost:8888', method='GET', uri='/python-http/?token=secrettest', version='HTTP/1.1', remote_ip='127.0.0.1')
# Traceback (most recent call last):
# File "/home/erik/py/lib/python3.7/site-packages/tornado/web.py", line 1704, in _execute
# result = await result
# File "/home/erik/py/lib/python3.7/site-packages/jupyter_server_proxy/websocket.py", line 97, in get
# return await self.http_get(*args, **kwargs)
# File "/home/erik/py/lib/python3.7/site-packages/jupyter_server_proxy/handlers.py", line 539, in http_get
# return await self.proxy(self.port, path)
# TypeError: object NoneType can't be used in 'await' expression
path = "?token={}#test".format(TOKEN)
h.request('GET', '/python-http/' + path)
r = h.getresponse()
assert r.code == 302
s = r.read().decode('ascii')
assert s == ''
def test_server_rewrite_response():
r = request_get(PORT, '/python-http-rewrite-response/ciao-a-tutti', TOKEN)
assert r.code == 418
assert r.reason == "I'm a teapot"
assert ("I-Like", "tacos") in r.headers.items()
assert ("Proxied-Host-Port", "localhost:54323") in r.headers.items()
assert ("Proxied-Path", "/ciao-a-tutti") in r.headers.items()
s = r.read().decode('ascii')
assert s.startswith('GET /hello-a-tutti?token=')
def test_chained_rewrite_response():
r = request_get(PORT, '/python-chained-rewrite-response/ciao-a-tutti', TOKEN)
assert r.code == 418
assert r.reason == "I'm a teapot"
s = r.read().decode('ascii')
assert s.startswith('GET /foo-a-tutti?token=')
def test_cats_and_dogs_rewrite_response():
r = request_get(PORT, '/python-cats-only-rewrite-response/goats', TOKEN)
assert r.code == 200
r = request_get(PORT, '/python-cats-only-rewrite-response/cat-club', TOKEN)
s = r.read().decode('ascii')
assert r.code == 403
assert r.reason == "Forbidden"
assert s == "dogs not allowed"
r = request_get(PORT, '/python-dogs-only-rewrite-response/cat-club', TOKEN)
s = r.read().decode('ascii')
assert r.code == 403
assert r.reason == "Forbidden"
assert s == "cats not allowed"
def test_server_proxy_non_absolute():
r = request_get(PORT, '/python-http/abc', TOKEN)
assert r.code == 200
s = r.read().decode('ascii')
assert s.startswith('GET /abc?token=')
assert 'X-Forwarded-Context: /python-http\n' in s
assert 'X-Proxycontextpath: /python-http\n' in s
def test_server_proxy_absolute():
r = request_get(PORT, '/python-http-abs/def', TOKEN)
assert r.code == 200
s = r.read().decode('ascii')
assert s.startswith('GET /python-http-abs/def?token=')
assert 'X-Forwarded-Context' not in s
assert 'X-Proxycontextpath' not in s
def test_server_proxy_requested_port():
r = request_get(PORT, '/python-http-port54321/ghi', TOKEN)
assert r.code == 200
s = r.read().decode('ascii')
assert s.startswith('GET /ghi?token=')
assert 'X-Forwarded-Context: /python-http-port54321\n' in s
assert 'X-Proxycontextpath: /python-http-port54321\n' in s
direct = request_get(54321, '/ghi', TOKEN)
assert direct.code == 200
def test_server_proxy_port_non_absolute():
r = request_get(PORT, '/proxy/54321/jkl', TOKEN)
assert r.code == 200
s = r.read().decode('ascii')
assert s.startswith('GET /jkl?token=')
assert 'X-Forwarded-Context: /proxy/54321\n' in s
assert 'X-Proxycontextpath: /proxy/54321\n' in s
def test_server_proxy_port_absolute():
r = request_get(PORT, '/proxy/absolute/54321/nmo', TOKEN)
assert r.code == 200
s = r.read().decode('ascii')
assert s.startswith('GET /proxy/absolute/54321/nmo?token=')
assert 'X-Forwarded-Context' not in s
assert 'X-Proxycontextpath' not in s
def test_server_proxy_host_non_absolute():
# note: localhost: is stripped but 127.0.0.1: is not
r = request_get(PORT, '/proxy/127.0.0.1:54321/jkl', TOKEN)
assert r.code == 200
s = r.read().decode('ascii')
assert s.startswith('GET /jkl?token=')
assert 'X-Forwarded-Context: /proxy/127.0.0.1:54321\n' in s
assert 'X-Proxycontextpath: /proxy/127.0.0.1:54321\n' in s
def test_server_proxy_host_absolute():
r = request_get(PORT, '/proxy/absolute/127.0.0.1:54321/nmo', TOKEN)
assert r.code == 200
s = r.read().decode('ascii')
assert s.startswith('GET /proxy/absolute/127.0.0.1:54321/nmo?token=')
assert 'X-Forwarded-Context' not in s
assert 'X-Proxycontextpath' not in s
def test_server_proxy_port_non_service_rewrite_response():
"""Test that 'hello' is replaced by 'foo'."""
r = request_get(PORT, '/proxy/54321/hello', TOKEN)
assert r.code == 200
s = r.read().decode('ascii')
assert s.startswith('GET /foo?token=')
@pytest.mark.parametrize(
"requestpath,expected", [
('/', '/index.html?token='),
('/?q=1', '/index.html?q=1&token='),
('/pqr?q=2', '/pqr?q=2&token='),
]
)
def test_server_proxy_mappath_dict(requestpath, expected):
r = request_get(PORT, '/python-http-mappath' + requestpath, TOKEN)
assert r.code == 200
s = r.read().decode('ascii')
assert s.startswith('GET ' + expected)
assert 'X-Forwarded-Context: /python-http-mappath\n' in s
assert 'X-Proxycontextpath: /python-http-mappath\n' in s
@pytest.mark.parametrize(
"requestpath,expected", [
('/', '/mapped?token='),
('/?q=1', '/mapped?q=1&token='),
('/stu?q=2', '/stumapped?q=2&token='),
]
)
def test_server_proxy_mappath_callable(requestpath, expected):
r = request_get(PORT, '/python-http-mappathf' + requestpath, TOKEN)
assert r.code == 200
s = r.read().decode('ascii')
assert s.startswith('GET ' + expected)
assert 'X-Forwarded-Context: /python-http-mappathf\n' in s
assert 'X-Proxycontextpath: /python-http-mappathf\n' in s
def test_server_proxy_remote():
r = request_get(PORT, '/newproxy', TOKEN, host='127.0.0.1')
assert r.code == 200
def test_server_request_headers():
r = request_get(PORT, '/python-request-headers/', TOKEN, host='127.0.0.1')
assert r.code == 200
s = r.read().decode('ascii')
assert 'X-Custom-Header: pytest-23456\n' in s
def test_server_content_encoding_header():
r = request_get(PORT, '/python-gzipserver/', TOKEN, host='127.0.0.1')
assert r.code == 200
assert r.headers['Content-Encoding'] == 'gzip'
with gzip.GzipFile(fileobj=BytesIO(r.read()), mode='r') as f:
assert f.read() == b'this is a test'
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
async def _websocket_echo():
url = "ws://localhost:{}/python-websocket/echosocket".format(PORT)
conn = await websocket_connect(url)
expected_msg = "Hello, world!"
await conn.write_message(expected_msg)
msg = await conn.read_message()
assert msg == expected_msg
def test_server_proxy_websocket(event_loop):
event_loop.run_until_complete(_websocket_echo())
async def _websocket_headers():
url = "ws://localhost:{}/python-websocket/headerssocket".format(PORT)
conn = await websocket_connect(url)
await conn.write_message("Hello")
msg = await conn.read_message()
headers = json.loads(msg)
assert 'X-Custom-Header' in headers
assert headers['X-Custom-Header'] == 'pytest-23456'
def test_server_proxy_websocket_headers(event_loop):
event_loop.run_until_complete(_websocket_headers())
async def _websocket_subprotocols():
url = "ws://localhost:{}/python-websocket/subprotocolsocket".format(PORT)
conn = await websocket_connect(url, subprotocols=["protocol_1", "protocol_2"])
await conn.write_message("Hello, world!")
msg = await conn.read_message()
assert json.loads(msg) == ["protocol_1", "protocol_2"]
def test_server_proxy_websocket_subprotocols(event_loop):
event_loop.run_until_complete(_websocket_subprotocols())
@pytest.mark.parametrize(
"proxy_path, status",
[
("127.0.0.1", 404),
("127.0.0.1/path", 404),
("[email protected]", 404),
("[email protected]/path", 404),
("user:pass@host:123/foo", 404),
("user:pass@host/foo", 404),
("absolute/127.0.0.1:[email protected]/path", 404),
]
)
def test_bad_server_proxy_url(proxy_path, status):
r = request_get(PORT, f"/proxy/{proxy_path}", TOKEN)
assert r.code == status
if status >= 400:
# request should not have been proxied
assert 'X-ProxyContextPath' not in r.headers
|
GHSA-w3vc-fx9p-wp4v
|
modoboa/admin/views/domain.py
|
@@ -16,6 +16,7 @@
from django.utils.translation import ugettext as _, ungettext
from django.views import generic
from django.views.decorators.csrf import ensure_csrf_cookie
+from django.views.decorators.http import require_http_methods
from modoboa.core import signals as core_signals
from modoboa.lib.exceptions import PermDeniedException
@@ -230,6 +231,7 @@ def editdomain(request, dom_id):
@login_required
@permission_required("admin.delete_domain")
+@require_http_methods(["POST"])
def deldomain(request, dom_id):
keepdir = request.POST.get("keepdir", "false") == "true"
try:
|
"""Domain related views."""
from functools import reduce
from reversion import revisions as reversion
from django.contrib.auth import mixins as auth_mixins
from django.contrib.auth.decorators import (
login_required, permission_required, user_passes_test
)
from django.db.models import Q, Sum
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import ugettext as _, ungettext
from django.views import generic
from django.views.decorators.csrf import ensure_csrf_cookie
from modoboa.core import signals as core_signals
from modoboa.lib.exceptions import PermDeniedException
from modoboa.lib.listing import get_listing_page, get_sort_order
from modoboa.lib.web_utils import render_to_json_response
from modoboa.maillog import models as ml_models
from .. import signals
from ..forms import DomainForm, DomainWizard
from ..lib import get_domains
from ..models import Domain, Mailbox
@login_required
def index(request):
return HttpResponseRedirect(reverse("admin:domain_list"))
@login_required
@user_passes_test(
lambda u: u.has_perm("admin.view_domain") or
u.has_perm("admin.view_mailbox")
)
def _domains(request):
sort_order, sort_dir = get_sort_order(request.GET, "name")
extra_filters = signals.extra_domain_filters.send(sender="_domains")
if extra_filters:
extra_filters = reduce(
lambda a, b: a + b, [result[1] for result in extra_filters])
filters = {
flt: request.GET.get(flt, None)
for flt in ["domfilter", "searchquery"] + extra_filters
}
request.session["domains_filters"] = filters
domainlist = get_domains(request.user, **filters)
if sort_order == "name":
domainlist = sorted(
domainlist,
key=lambda d: getattr(d, sort_order), reverse=sort_dir == "-"
)
else:
domainlist = sorted(domainlist, key=lambda d: d.tags[0]["name"],
reverse=sort_dir == "-")
context = {
"handle_mailboxes": request.localconfig.parameters.get_value(
"handle_mailboxes", raise_exception=False),
"auto_account_removal": request.localconfig.parameters.get_value(
"auto_account_removal"),
}
page = get_listing_page(domainlist, request.GET.get("page", 1))
parameters = request.localconfig.parameters
dns_checks = {
"enable_mx_checks": parameters.get_value("enable_mx_checks"),
"enable_spf_checks": parameters.get_value("enable_spf_checks"),
"enable_dkim_checks": parameters.get_value("enable_dkim_checks"),
"enable_dmarc_checks": parameters.get_value("enable_dmarc_checks"),
"enable_autoconfig_checks": (
parameters.get_value("enable_autoconfig_checks")),
"enable_dnsbl_checks": parameters.get_value("enable_dnsbl_checks")
}
context["headers"] = render_to_string(
"admin/domain_headers.html", dns_checks, request
)
if page is None:
context["length"] = 0
else:
tpl_context = {"domains": page.object_list}
tpl_context.update(dns_checks)
context["rows"] = render_to_string(
"admin/domains_table.html", tpl_context, request
)
context["pages"] = [page.number]
return render_to_json_response(context)
@login_required
@ensure_csrf_cookie
def domains(request, tplname="admin/domains.html"):
if not request.user.has_perm("admin.view_domain"):
if request.user.has_perm("admin.view_mailbox"):
return HttpResponseRedirect(
reverse("admin:identity_list")
)
return HttpResponseRedirect(reverse("core:user_index"))
parameters = request.localconfig.parameters
return render(request, tplname, {
"selection": "domains",
"enable_mx_checks": parameters.get_value("enable_mx_checks"),
"enable_spf_checks": parameters.get_value("enable_spf_checks"),
"enable_dkim_checks": parameters.get_value("enable_dkim_checks"),
"enable_dmarc_checks": parameters.get_value("enable_dmarc_checks"),
"enable_autoconfig_checks": (
parameters.get_value("enable_autoconfig_checks")),
"enable_dnsbl_checks": parameters.get_value("enable_dnsbl_checks")
})
@login_required
@user_passes_test(
lambda u: u.has_perm("admin.view_domain") or
u.has_perm("admin.view_mailbox") or
u.has_perm("admin.add_domain")
)
def get_next_page(request):
"""Return the next page of the domain or quota list."""
objtype = request.GET.get("objtype", "domain")
if objtype == "domain":
return _domains(request)
if objtype == "quota":
return list_quotas(request)
return list_logs(request)
@login_required
@permission_required("core.add_user")
def domains_list(request):
doms = [dom.name for dom in Domain.objects.get_for_admin(request.user)]
return render_to_json_response(doms)
@login_required
@permission_required("admin.view_domain")
def list_quotas(request):
sort_order, sort_dir = get_sort_order(request.GET, "name")
domains = Domain.objects.get_for_admin(request.user)
domains = domains.exclude(quota=0)
if sort_order in ["name", "quota"]:
domains = domains.order_by("{}{}".format(sort_dir, sort_order))
elif sort_order == "allocated_quota":
domains = (
domains.annotate(allocated_quota=Sum("mailbox__quota"))
.order_by("{}{}".format(sort_dir, sort_order))
)
page = get_listing_page(domains, request.GET.get("page", 1))
context = {
"headers": render_to_string(
"admin/domains_quota_headers.html", {}, request
)
}
if page is None:
context["length"] = 0
else:
context["rows"] = render_to_string(
"admin/domains_quotas.html", {"domains": page}, request
)
context["pages"] = [page.number]
return render_to_json_response(context)
@login_required
@permission_required("admin.view_domain")
def list_logs(request):
"""List all Maillog entries."""
sort_order, sort_dir = get_sort_order(request.GET, "date")
search = request.GET.get("searchquery")
if not request.user.is_superuser:
domains = Domain.objects.get_for_admin(request.user)
logs = ml_models.Maillog.objects.filter(
Q(from_domain__in=domains) | Q(to_domain__in=domains)
)
else:
logs = ml_models.Maillog.objects.all()
logs = logs.order_by("{}{}".format(sort_dir, sort_order))
if search:
logs = logs.filter(
Q(sender__icontains=search) |
Q(rcpt__icontains=search) |
Q(queue_id__icontains=search) |
Q(status__icontains=search)
)
page = get_listing_page(logs, request.GET.get("page", 1))
context = {
"headers": render_to_string(
"admin/domains_log_headers.html", {}, request
)
}
if page is None:
context["length"] = 0
else:
context["rows"] = render_to_string(
"admin/domains_logs.html", {"logs": page}, request
)
context["pages"] = [page.number]
return render_to_json_response(context)
@login_required
@permission_required("admin.add_domain")
@reversion.create_revision()
def newdomain(request):
core_signals.can_create_object.send(
"newdomain", context=request.user, klass=Domain)
return DomainWizard(request).process()
@login_required
@permission_required("admin.view_domain")
@reversion.create_revision()
def editdomain(request, dom_id):
"""Edit domain view."""
domain = Domain.objects.get(pk=dom_id)
if not request.user.can_access(domain):
raise PermDeniedException
instances = {"general": domain}
results = signals.get_domain_form_instances.send(
sender="editdomain", user=request.user, domain=domain)
for result in results:
instances.update(result[1])
return DomainForm(request, instances=instances).process()
@login_required
@permission_required("admin.delete_domain")
def deldomain(request, dom_id):
keepdir = request.POST.get("keepdir", "false") == "true"
try:
mb = Mailbox.objects.get(user__id=request.user.id)
except Mailbox.DoesNotExist:
mb = None
dom = Domain.objects.get(pk=dom_id)
if not request.user.can_access(dom):
raise PermDeniedException
if mb and mb.domain == dom:
raise PermDeniedException(_("You can't delete your own domain"))
dom.delete(request.user, keepdir)
msg = ungettext("Domain deleted", "Domains deleted", 1)
return render_to_json_response(msg)
class DomainDetailView(
auth_mixins.PermissionRequiredMixin, generic.DetailView):
"""DetailView for Domain."""
model = Domain
permission_required = "admin.view_domain"
def get_queryset(self):
"""Add some prefetching."""
return (
Domain.objects.get_for_admin(self.request.user)
.prefetch_related("domainalias_set", "mailbox_set", "alias_set")
)
def get_context_data(self, **kwargs):
"""Include extra widgets."""
context = super(DomainDetailView, self).get_context_data(**kwargs)
result = signals.extra_domain_dashboard_widgets.send(
self.__class__, user=self.request.user, domain=self.object)
parameters = self.request.localconfig.parameters
context.update({
"templates": {"left": [], "right": []},
"enable_mx_checks": parameters.get_value("enable_mx_checks"),
"enable_spf_checks": parameters.get_value("enable_spf_checks"),
"enable_dkim_checks": parameters.get_value("enable_dkim_checks"),
"enable_dmarc_checks": parameters.get_value("enable_dmarc_checks"),
"enable_autoconfig_checks": (
parameters.get_value("enable_autoconfig_checks")),
"enable_dnsbl_checks": parameters.get_value("enable_dnsbl_checks"),
})
for _receiver, widgets in result:
for widget in widgets:
context["templates"][widget["column"]].append(
widget["template"])
# FIXME: can raise conflicts...
context.update(widget["context"])
return context
class DomainAlarmsView(
auth_mixins.PermissionRequiredMixin, generic.DetailView):
"""A view to list domain alarms."""
model = Domain
permission_required = "admin.view_domain"
template_name = "admin/domain_alarms.html"
def get_queryset(self):
"""Add some prefetching."""
return (
Domain.objects.get_for_admin(self.request.user)
.prefetch_related("alarms")
)
|
PYSEC-2023-282
|
src/lxml/tests/test_etree.py
|
@@ -1460,6 +1460,26 @@ def test_iterwalk_getiterator(self):
[1,2,1,4],
counts)
+ def test_walk_after_parse_failure(self):
+ # This used to be an issue because libxml2 can leak empty namespaces
+ # between failed parser runs. iterwalk() failed to handle such a tree.
+ try:
+ etree.XML('''<anot xmlns="1">''')
+ except etree.XMLSyntaxError:
+ pass
+ else:
+ assert False, "invalid input did not fail to parse"
+
+ et = etree.XML('''<root> </root>''')
+ try:
+ ns = next(etree.iterwalk(et, events=('start-ns',)))
+ except StopIteration:
+ # This would be the expected result, because there was no namespace
+ pass
+ else:
+ # This is a bug in libxml2
+ assert not ns, repr(ns)
+
def test_itertext_comment_pi(self):
# https://bugs.launchpad.net/lxml/+bug/1844674
XML = self.etree.XML
|
# -*- coding: utf-8 -*-
"""
Tests specific to the extended etree API
Tests that apply to the general ElementTree API should go into
test_elementtree
"""
from __future__ import absolute_import
from collections import OrderedDict
import os.path
import unittest
import copy
import sys
import re
import gc
import operator
import textwrap
import zlib
import gzip
from .common_imports import etree, StringIO, BytesIO, HelperTestCase
from .common_imports import fileInTestDir, fileUrlInTestDir, read_file, path2url, tmpfile
from .common_imports import SillyFileLike, LargeFileLikeUnicode, doctest, make_doctest
from .common_imports import canonicalize, _str, _bytes
from .common_imports import SimpleFSPath
print("""
TESTED VERSION: %s""" % etree.__version__ + """
Python: %r""" % (sys.version_info,) + """
lxml.etree: %r""" % (etree.LXML_VERSION,) + """
libxml used: %r""" % (etree.LIBXML_VERSION,) + """
libxml compiled: %r""" % (etree.LIBXML_COMPILED_VERSION,) + """
libxslt used: %r""" % (etree.LIBXSLT_VERSION,) + """
libxslt compiled: %r""" % (etree.LIBXSLT_COMPILED_VERSION,) + """
FS encoding: %s""" % (sys.getfilesystemencoding(),) + """
Default encoding: %s""" % (sys.getdefaultencoding(),) + """
Max Unicode: %s""" % (sys.maxunicode,) + """
""")
try:
_unicode = unicode
except NameError:
# Python 3
_unicode = str
class ETreeOnlyTestCase(HelperTestCase):
"""Tests only for etree, not ElementTree"""
etree = etree
def test_version(self):
self.assertTrue(isinstance(etree.__version__, _unicode))
self.assertTrue(isinstance(etree.LXML_VERSION, tuple))
self.assertEqual(len(etree.LXML_VERSION), 4)
self.assertTrue(isinstance(etree.LXML_VERSION[0], int))
self.assertTrue(isinstance(etree.LXML_VERSION[1], int))
self.assertTrue(isinstance(etree.LXML_VERSION[2], int))
self.assertTrue(isinstance(etree.LXML_VERSION[3], int))
self.assertTrue(etree.__version__.startswith(
str(etree.LXML_VERSION[0])))
def test_c_api(self):
if hasattr(self.etree, '__pyx_capi__'):
# newer Pyrex compatible C-API
self.assertTrue(isinstance(self.etree.__pyx_capi__, dict))
self.assertTrue(len(self.etree.__pyx_capi__) > 0)
else:
# older C-API mechanism
self.assertTrue(hasattr(self.etree, '_import_c_api'))
def test_include_paths(self):
import lxml
includes = lxml.get_include()
self.assertTrue(includes)
self.assertTrue(len(includes) >= 2)
self.assertTrue(os.path.join(os.path.dirname(lxml.__file__), 'includes') in includes,
includes)
def test_element_names(self):
Element = self.etree.Element
el = Element('name')
self.assertEqual(el.tag, 'name')
el = Element('{}name')
self.assertEqual(el.tag, 'name')
def test_element_name_empty(self):
Element = self.etree.Element
el = Element('name')
self.assertRaises(ValueError, Element, '{}')
self.assertRaises(ValueError, setattr, el, 'tag', '{}')
self.assertRaises(ValueError, Element, '{test}')
self.assertRaises(ValueError, setattr, el, 'tag', '{test}')
def test_element_name_colon(self):
Element = self.etree.Element
self.assertRaises(ValueError, Element, 'p:name')
self.assertRaises(ValueError, Element, '{test}p:name')
el = Element('name')
self.assertRaises(ValueError, setattr, el, 'tag', 'p:name')
def test_element_name_quote(self):
Element = self.etree.Element
self.assertRaises(ValueError, Element, "p'name")
self.assertRaises(ValueError, Element, 'p"name')
self.assertRaises(ValueError, Element, "{test}p'name")
self.assertRaises(ValueError, Element, '{test}p"name')
el = Element('name')
self.assertRaises(ValueError, setattr, el, 'tag', "p'name")
self.assertRaises(ValueError, setattr, el, 'tag', 'p"name')
def test_element_name_space(self):
Element = self.etree.Element
self.assertRaises(ValueError, Element, ' name ')
self.assertRaises(ValueError, Element, 'na me')
self.assertRaises(ValueError, Element, '{test} name')
el = Element('name')
self.assertRaises(ValueError, setattr, el, 'tag', ' name ')
def test_subelement_name_empty(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, '{}')
self.assertRaises(ValueError, SubElement, el, '{test}')
def test_subelement_name_colon(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, 'p:name')
self.assertRaises(ValueError, SubElement, el, '{test}p:name')
def test_subelement_name_quote(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, "p'name")
self.assertRaises(ValueError, SubElement, el, "{test}p'name")
self.assertRaises(ValueError, SubElement, el, 'p"name')
self.assertRaises(ValueError, SubElement, el, '{test}p"name')
def test_subelement_name_space(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, ' name ')
self.assertRaises(ValueError, SubElement, el, 'na me')
self.assertRaises(ValueError, SubElement, el, '{test} name')
def test_subelement_attribute_invalid(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, 'name', {'a b c' : 'abc'})
self.assertRaises(ValueError, SubElement, el, 'name', {'a' : 'a\0\n'})
self.assertEqual(0, len(el))
def test_qname_empty(self):
QName = self.etree.QName
self.assertRaises(ValueError, QName, '')
self.assertRaises(ValueError, QName, None)
self.assertRaises(ValueError, QName, None, None)
self.assertRaises(ValueError, QName, 'test', '')
def test_qname_none(self):
QName = self.etree.QName
q = QName(None, 'TAG')
self.assertEqual('TAG', q)
self.assertEqual('TAG', q.localname)
self.assertEqual(None, q.namespace)
def test_qname_colon(self):
QName = self.etree.QName
self.assertRaises(ValueError, QName, 'p:name')
self.assertRaises(ValueError, QName, 'test', 'p:name')
def test_qname_space(self):
QName = self.etree.QName
self.assertRaises(ValueError, QName, ' name ')
self.assertRaises(ValueError, QName, 'na me')
self.assertRaises(ValueError, QName, 'test', ' name')
def test_qname_namespace_localname(self):
# ET doesn't have namespace/localname properties on QNames
QName = self.etree.QName
namespace, localname = 'http://myns', 'a'
qname = QName(namespace, localname)
self.assertEqual(namespace, qname.namespace)
self.assertEqual(localname, qname.localname)
def test_qname_element(self):
# ET doesn't have namespace/localname properties on QNames
QName = self.etree.QName
qname1 = QName('http://myns', 'a')
a = self.etree.Element(qname1, nsmap={'p' : 'http://myns'})
qname2 = QName(a)
self.assertEqual(a.tag, qname1.text)
self.assertEqual(a.tag, qname1)
self.assertEqual(qname1.text, qname2.text)
self.assertEqual(qname1, qname2.text)
self.assertEqual(qname1.text, qname2)
self.assertEqual(qname1, qname2)
def test_qname_text_resolve(self):
# ET doesn't resove QNames as text values
etree = self.etree
qname = etree.QName('http://myns', 'a')
a = etree.Element(qname, nsmap={'p' : 'http://myns'})
a.text = qname
self.assertEqual("p:a", a.text)
def test_nsmap_prefix_invalid(self):
etree = self.etree
self.assertRaises(ValueError,
etree.Element, "root", nsmap={'"' : 'testns'})
self.assertRaises(ValueError,
etree.Element, "root", nsmap={'&' : 'testns'})
self.assertRaises(ValueError,
etree.Element, "root", nsmap={'a:b' : 'testns'})
def test_clear_keep_tail(self):
XML = self.etree.XML
tostring = self.etree.tostring
a = XML('<a aa="A"><b ba="B">B1</b>B2<c ca="C">C1</c>C2</a>')
a[0].clear(keep_tail=True)
self.assertEqual(_bytes('<a aa="A"><b/>B2<c ca="C">C1</c>C2</a>'), tostring(a))
def test_attribute_has_key(self):
# ET in Py 3.x has no "attrib.has_key()" method
XML = self.etree.XML
root = XML(_bytes('<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />'))
self.assertEqual(
True, root.attrib.has_key('bar'))
self.assertEqual(
False, root.attrib.has_key('baz'))
self.assertEqual(
False, root.attrib.has_key('hah'))
self.assertEqual(
True,
root.attrib.has_key('{http://ns.codespeak.net/test}baz'))
def test_attribute_set(self):
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.get("attr"))
def test_attribute_set_nonstring(self):
# ElementTree accepts arbitrary attribute values
# lxml.etree allows only strings
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.get("attr"))
self.assertRaises(TypeError, root.set, "newattr", 5)
def test_attrib_and_keywords(self):
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.attrib["attr"])
root2 = Element("root2", root.attrib, attr2='TOAST')
self.assertEqual("TEST", root2.attrib["attr"])
self.assertEqual("TOAST", root2.attrib["attr2"])
self.assertEqual(None, root.attrib.get("attr2"))
def test_attrib_order(self):
Element = self.etree.Element
keys = ["attr%d" % i for i in range(12, 4, -1)]
values = ["TEST-%d" % i for i in range(12, 4, -1)]
items = list(zip(keys, values))
root = Element("root")
for key, value in items:
root.set(key, value)
self.assertEqual(keys, root.attrib.keys())
self.assertEqual(values, root.attrib.values())
attr_order = [
('attr_99', 'TOAST-1'),
('attr_98', 'TOAST-2'),
]
ordered_dict_types = [OrderedDict, lambda x:x]
if sys.version_info >= (3, 6):
ordered_dict_types.append(dict)
else:
# Keyword arguments are not ordered in Py<3.6, and thus get sorted.
attr_order.sort()
attr_order += items
expected_keys = [attr[0] for attr in attr_order]
expected_values = [attr[1] for attr in attr_order]
expected_items = list(zip(expected_keys, expected_values))
for dict_type in ordered_dict_types:
root2 = Element("root2", dict_type(root.attrib),
attr_99='TOAST-1', attr_98='TOAST-2')
try:
self.assertSequenceEqual(expected_keys, root2.attrib.keys())
self.assertSequenceEqual(expected_values, root2.attrib.values())
self.assertSequenceEqual(expected_items, root2.attrib.items())
except AssertionError as exc:
exc.args = ("Order of '%s': %s" % (dict_type.__name__, exc.args[0]),) + exc.args[1:]
raise
self.assertEqual(keys, root.attrib.keys())
self.assertEqual(values, root.attrib.values())
def test_attribute_set_invalid(self):
# ElementTree accepts arbitrary attribute values
# lxml.etree allows only strings, or None for (html5) boolean attributes
Element = self.etree.Element
root = Element("root")
self.assertRaises(TypeError, root.set, "newattr", 5)
self.assertRaises(TypeError, root.set, "newattr", object)
self.assertRaises(TypeError, root.set, "newattr", None)
self.assertRaises(TypeError, root.set, "newattr")
def test_strip_attributes(self):
XML = self.etree.XML
xml = _bytes('<test a="5" b="10" c="20"><x a="4" b="2"/></test>')
root = XML(xml)
self.etree.strip_attributes(root, 'a')
self.assertEqual(_bytes('<test b="10" c="20"><x b="2"></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_attributes(root, 'b', 'c')
self.assertEqual(_bytes('<test a="5"><x a="4"></x></test>'),
self._writeElement(root))
def test_strip_attributes_ns(self):
XML = self.etree.XML
xml = _bytes('<test xmlns:n="http://test/ns" a="6" b="10" c="20" n:a="5"><x a="4" n:b="2"/></test>')
root = XML(xml)
self.etree.strip_attributes(root, 'a')
self.assertEqual(
_bytes('<test xmlns:n="http://test/ns" b="10" c="20" n:a="5"><x n:b="2"></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_attributes(root, '{http://test/ns}a', 'c')
self.assertEqual(
_bytes('<test xmlns:n="http://test/ns" a="6" b="10"><x a="4" n:b="2"></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_attributes(root, '{http://test/ns}*')
self.assertEqual(
_bytes('<test xmlns:n="http://test/ns" a="6" b="10" c="20"><x a="4"></x></test>'),
self._writeElement(root))
def test_strip_elements(self):
XML = self.etree.XML
xml = _bytes('<test><a><b><c/></b></a><x><a><b/><c/></a></x></test>')
root = XML(xml)
self.etree.strip_elements(root, 'a')
self.assertEqual(_bytes('<test><x></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, 'b', 'c', 'X', 'Y', 'Z')
self.assertEqual(_bytes('<test><a></a><x><a></a></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, 'c')
self.assertEqual(_bytes('<test><a><b></b></a><x><a><b></b></a></x></test>'),
self._writeElement(root))
def test_strip_elements_ns(self):
XML = self.etree.XML
xml = _bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"/>C</b>BT</n:a>AT<x>X<a>A<b xmlns="urn:a"/>BT<c xmlns="urn:x"/>CT</a>AT</x>XT</test>')
root = XML(xml)
self.etree.strip_elements(root, 'a')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>C</b>BT</n:a>AT<x>X</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, '{urn:a}b', 'c')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>C</b>BT</n:a>AT<x>X<a>A<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, '{urn:a}*', 'c')
self.assertEqual(_bytes('<test>TEST<x>X<a>A<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, '{urn:a}*', 'c', with_tail=False)
self.assertEqual(_bytes('<test>TESTAT<x>X<a>ABT<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
def test_strip_tags(self):
XML = self.etree.XML
xml = _bytes('<test>TEST<a>A<b>B<c/>CT</b>BT</a>AT<x>X<a>A<b/>BT<c/>CT</a>AT</x>XT</test>')
root = XML(xml)
self.etree.strip_tags(root, 'a')
self.assertEqual(_bytes('<test>TESTA<b>B<c></c>CT</b>BTAT<x>XA<b></b>BT<c></c>CTAT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, 'b', 'c', 'X', 'Y', 'Z')
self.assertEqual(_bytes('<test>TEST<a>ABCTBT</a>AT<x>X<a>ABTCT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, 'c')
self.assertEqual(_bytes('<test>TEST<a>A<b>BCT</b>BT</a>AT<x>X<a>A<b></b>BTCT</a>AT</x>XT</test>'),
self._writeElement(root))
def test_strip_tags_pi_comment(self):
XML = self.etree.XML
PI = self.etree.ProcessingInstruction
Comment = self.etree.Comment
xml = _bytes('<!--comment1-->\n<?PI1?>\n<test>TEST<!--comment2-->XT<?PI2?></test>\n<!--comment3-->\n<?PI1?>')
root = XML(xml)
self.etree.strip_tags(root, PI)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TEST<!--comment2-->XT</test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, Comment)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TESTXT<?PI2?></test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, PI, Comment)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TESTXT</test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, Comment, PI)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TESTXT</test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
def test_strip_tags_pi_comment_all(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
PI = self.etree.ProcessingInstruction
Comment = self.etree.Comment
xml = _bytes('<!--comment1-->\n<?PI1?>\n<test>TEST<!--comment2-->XT<?PI2?></test>\n<!--comment3-->\n<?PI1?>')
root = XML(xml)
self.etree.strip_tags(ElementTree(root), PI)
self.assertEqual(_bytes('<!--comment1-->\n<test>TEST<!--comment2-->XT</test>\n<!--comment3-->'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(ElementTree(root), Comment)
self.assertEqual(_bytes('<?PI1?>\n<test>TESTXT<?PI2?></test>\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(ElementTree(root), PI, Comment)
self.assertEqual(_bytes('<test>TESTXT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(ElementTree(root), Comment, PI)
self.assertEqual(_bytes('<test>TESTXT</test>'),
self._writeElement(root))
def test_strip_tags_doc_style(self):
XML = self.etree.XML
xml = _bytes('''
<div>
<div>
I like <strong>sheep</strong>.
<br/>
I like lots of <strong>sheep</strong>.
<br/>
Click <a href="http://www.sheep.com">here</a>
for <a href="http://www.sheep.com">those</a> sheep.
<br/>
</div>
</div>
'''.strip())
root = XML(xml)
self.etree.strip_tags(root, 'a')
self.assertEqual(re.sub(_bytes('</?a[^>]*>'), _bytes(''), xml).replace(_bytes('<br/>'), _bytes('<br></br>')),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, 'a', 'br')
self.assertEqual(re.sub(_bytes('</?a[^>]*>'), _bytes(''),
re.sub(_bytes('<br[^>]*>'), _bytes(''), xml)),
self._writeElement(root))
def test_strip_tags_ns(self):
XML = self.etree.XML
xml = _bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"/>CT</b>BT</n:a>AT<x>X<a>A<b xmlns="urn:a"/>BT<c xmlns="urn:x"/>CT</a>AT</x>XT</test>')
root = XML(xml)
self.etree.strip_tags(root, 'a')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>CT</b>BT</n:a>AT<x>XA<b xmlns="urn:a"></b>BT<c xmlns="urn:x"></c>CTAT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, '{urn:a}b', 'c')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>CT</b>BT</n:a>AT<x>X<a>ABT<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, '{urn:a}*', 'c')
self.assertEqual(_bytes('<test>TESTA<b>B<c xmlns="urn:c"></c>CT</b>BTAT<x>X<a>ABT<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
def test_strip_tags_and_remove(self):
# previously crashed
HTML = self.etree.HTML
root = HTML(_bytes('<div><h1>title</h1> <b>foo</b> <p>boo</p></div>'))[0][0]
self.assertEqual(_bytes('<div><h1>title</h1> <b>foo</b> <p>boo</p></div>'),
self.etree.tostring(root))
self.etree.strip_tags(root, 'b')
self.assertEqual(_bytes('<div><h1>title</h1> foo <p>boo</p></div>'),
self.etree.tostring(root))
root.remove(root[0])
self.assertEqual(_bytes('<div><p>boo</p></div>'),
self.etree.tostring(root))
def test_pi(self):
# lxml.etree separates target and text
Element = self.etree.Element
SubElement = self.etree.SubElement
ProcessingInstruction = self.etree.ProcessingInstruction
a = Element('a')
a.append(ProcessingInstruction('foo', 'some more text'))
self.assertEqual(a[0].target, 'foo')
self.assertEqual(a[0].text, 'some more text')
def test_pi_parse(self):
XML = self.etree.XML
root = XML(_bytes("<test><?mypi my test ?></test>"))
self.assertEqual(root[0].target, "mypi")
self.assertEqual(root[0].text, "my test ")
def test_pi_pseudo_attributes_get(self):
XML = self.etree.XML
root = XML(_bytes("<test><?mypi my='1' test=\" abc \" quotes=\"' '\" only names ?></test>"))
self.assertEqual(root[0].target, "mypi")
self.assertEqual(root[0].get('my'), "1")
self.assertEqual(root[0].get('test'), " abc ")
self.assertEqual(root[0].get('quotes'), "' '")
self.assertEqual(root[0].get('only'), None)
self.assertEqual(root[0].get('names'), None)
self.assertEqual(root[0].get('nope'), None)
def test_pi_pseudo_attributes_attrib(self):
XML = self.etree.XML
root = XML(_bytes("<test><?mypi my='1' test=\" abc \" quotes=\"' '\" only names ?></test>"))
self.assertEqual(root[0].target, "mypi")
self.assertEqual(root[0].attrib['my'], "1")
self.assertEqual(root[0].attrib['test'], " abc ")
self.assertEqual(root[0].attrib['quotes'], "' '")
self.assertRaises(KeyError, root[0].attrib.__getitem__, 'only')
self.assertRaises(KeyError, root[0].attrib.__getitem__, 'names')
self.assertRaises(KeyError, root[0].attrib.__getitem__, 'nope')
def test_deepcopy_pi(self):
# previously caused a crash
ProcessingInstruction = self.etree.ProcessingInstruction
a = ProcessingInstruction("PI", "ONE")
b = copy.deepcopy(a)
b.text = "ANOTHER"
self.assertEqual('ONE', a.text)
self.assertEqual('ANOTHER', b.text)
def test_deepcopy_elementtree_pi(self):
XML = self.etree.XML
tostring = self.etree.tostring
root = XML(_bytes("<?mypi my test ?><test/><!--comment -->"))
tree1 = self.etree.ElementTree(root)
self.assertEqual(_bytes("<?mypi my test ?><test/><!--comment -->"),
tostring(tree1))
tree2 = copy.deepcopy(tree1)
self.assertEqual(_bytes("<?mypi my test ?><test/><!--comment -->"),
tostring(tree2))
root2 = copy.deepcopy(tree1.getroot())
self.assertEqual(_bytes("<test/>"),
tostring(root2))
def test_deepcopy_elementtree_dtd(self):
XML = self.etree.XML
tostring = self.etree.tostring
xml = _bytes('<!DOCTYPE test [\n<!ENTITY entity "tasty">\n]>\n<test/>')
root = XML(xml)
tree1 = self.etree.ElementTree(root)
self.assertEqual(xml, tostring(tree1))
tree2 = copy.deepcopy(tree1)
self.assertEqual(xml, tostring(tree2))
root2 = copy.deepcopy(tree1.getroot())
self.assertEqual(_bytes("<test/>"),
tostring(root2))
def test_deepcopy_pi_dtd(self):
XML = self.etree.XML
tostring = self.etree.tostring
xml = _bytes('<!-- comment --><!DOCTYPE test [\n<!ENTITY entity "tasty">\n]>\n<test/>')
root = XML(xml)
tree1 = self.etree.ElementTree(root)
self.assertEqual(xml, tostring(tree1))
tree2 = copy.deepcopy(tree1)
self.assertEqual(xml, tostring(tree2))
def test_parse_remove_comments(self):
fromstring = self.etree.fromstring
tostring = self.etree.tostring
XMLParser = self.etree.XMLParser
xml = _bytes('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
parser = XMLParser(remove_comments=True)
root = fromstring(xml, parser)
self.assertEqual(
_bytes('<a><b><c/></b></a>'),
tostring(root))
def test_parse_remove_pis(self):
parse = self.etree.parse
tostring = self.etree.tostring
XMLParser = self.etree.XMLParser
xml = _bytes('<?test?><a><?A?><b><?B?><c/></b><?C?></a><?tail?>')
f = BytesIO(xml)
tree = parse(f)
self.assertEqual(
xml,
tostring(tree))
parser = XMLParser(remove_pis=True)
tree = parse(f, parser)
self.assertEqual(
_bytes('<a><b><c/></b></a>'),
tostring(tree))
def test_parse_parser_type_error(self):
# ET raises IOError only
parse = self.etree.parse
self.assertRaises(TypeError, parse, 'notthere.xml', object())
def test_iterparse_getiterator(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
counts = []
for event, elem in iterparse(f):
counts.append(len(list(elem.getiterator())))
self.assertEqual(
[1,2,1,4],
counts)
def test_iterparse_tree_comments(self):
# ET removes comments
iterparse = self.etree.iterparse
tostring = self.etree.tostring
f = BytesIO('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
events = list(iterparse(f))
root = events[-1][1]
self.assertEqual(3, len(events))
self.assertEqual(
_bytes('<a><!--A--><b><!-- B --><c/></b><!--C--></a>'),
tostring(root))
def test_iterparse_comments(self):
# ET removes comments
iterparse = self.etree.iterparse
tostring = self.etree.tostring
def name(event, el):
if event == 'comment':
return el.text
else:
return el.tag
f = BytesIO('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
events = list(iterparse(f, events=('end', 'comment')))
root = events[-1][1]
self.assertEqual(6, len(events))
self.assertEqual(['A', ' B ', 'c', 'b', 'C', 'a'],
[ name(*item) for item in events ])
self.assertEqual(
_bytes('<a><!--A--><b><!-- B --><c/></b><!--C--></a>'),
tostring(root))
def test_iterparse_pis(self):
# ET removes pis
iterparse = self.etree.iterparse
tostring = self.etree.tostring
ElementTree = self.etree.ElementTree
def name(event, el):
if event == 'pi':
return el.target, el.text
else:
return el.tag
f = BytesIO('<?pia a?><a><?pib b?><b><?pic c?><c/></b><?pid d?></a><?pie e?>')
events = list(iterparse(f, events=('end', 'pi')))
root = events[-2][1]
self.assertEqual(8, len(events))
self.assertEqual([('pia','a'), ('pib','b'), ('pic','c'), 'c', 'b',
('pid','d'), 'a', ('pie','e')],
[ name(*item) for item in events ])
self.assertEqual(
_bytes('<?pia a?><a><?pib b?><b><?pic c?><c/></b><?pid d?></a><?pie e?>'),
tostring(ElementTree(root)))
def test_iterparse_remove_comments(self):
iterparse = self.etree.iterparse
tostring = self.etree.tostring
f = BytesIO('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
events = list(iterparse(f, remove_comments=True,
events=('end', 'comment')))
root = events[-1][1]
self.assertEqual(3, len(events))
self.assertEqual(['c', 'b', 'a'],
[ el.tag for (event, el) in events ])
self.assertEqual(
_bytes('<a><b><c/></b></a>'),
tostring(root))
def test_iterparse_broken(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><c/></a>')
# ET raises ExpatError, lxml raises XMLSyntaxError
self.assertRaises(self.etree.XMLSyntaxError, list, iterparse(f))
def test_iterparse_broken_recover(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><c/></a>')
it = iterparse(f, events=('start', 'end'), recover=True)
events = [(ev, el.tag) for ev, el in it]
root = it.root
self.assertTrue(root is not None)
self.assertEqual(1, events.count(('start', 'a')))
self.assertEqual(1, events.count(('end', 'a')))
self.assertEqual(1, events.count(('start', 'b')))
self.assertEqual(1, events.count(('end', 'b')))
self.assertEqual(1, events.count(('start', 'c')))
self.assertEqual(1, events.count(('end', 'c')))
def test_iterparse_broken_multi_recover(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><c/></d><b><c/></a></b>')
it = iterparse(f, events=('start', 'end'), recover=True)
events = [(ev, el.tag) for ev, el in it]
root = it.root
self.assertTrue(root is not None)
self.assertEqual(1, events.count(('start', 'a')))
self.assertEqual(1, events.count(('end', 'a')))
self.assertEqual(2, events.count(('start', 'b')))
self.assertEqual(2, events.count(('end', 'b')))
self.assertEqual(2, events.count(('start', 'c')))
self.assertEqual(2, events.count(('end', 'c')))
def test_iterparse_strip(self):
iterparse = self.etree.iterparse
f = BytesIO("""
<a> \n \n <b> b test </b> \n
\n\t <c> \n </c> </a> \n """)
iterator = iterparse(f, remove_blank_text=True)
text = [ (element.text, element.tail)
for event, element in iterator ]
self.assertEqual(
[(" b test ", None), (" \n ", None), (None, None)],
text)
def test_iterparse_tag(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
def test_iterparse_tag_all(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(
8,
len(events))
def test_iterparse_tag_ns(self):
iterparse = self.etree.iterparse
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{urn:test:1}b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
def test_iterparse_tag_ns_empty(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual([], events)
def test_iterparse_tag_ns_all(self):
iterparse = self.etree.iterparse
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{urn:test:1}*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(8, len(events))
def test_iterparse_tag_ns_empty_all(self):
iterparse = self.etree.iterparse
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}*", events=('start', 'end'))
events = list(iterator)
self.assertEqual([], events)
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(8, len(events))
def test_iterparse_encoding_error(self):
text = _str('Søk på nettet')
wrong_declaration = "<?xml version='1.0' encoding='UTF-8'?>"
xml_latin1 = (_str('%s<a>%s</a>') % (wrong_declaration, text)
).encode('iso-8859-1')
self.assertRaises(self.etree.ParseError,
list, self.etree.iterparse(BytesIO(xml_latin1)))
def test_iterparse_encoding_8bit_override(self):
text = _str('Søk på nettet', encoding="UTF-8")
wrong_declaration = "<?xml version='1.0' encoding='UTF-8'?>"
xml_latin1 = (_str('%s<a>%s</a>') % (wrong_declaration, text)
).encode('iso-8859-1')
iterator = self.etree.iterparse(BytesIO(xml_latin1),
encoding="iso-8859-1")
self.assertEqual(1, len(list(iterator)))
a = iterator.root
self.assertEqual(a.text, text)
def test_iterparse_keep_cdata(self):
tostring = self.etree.tostring
f = BytesIO('<root><![CDATA[test]]></root>')
context = self.etree.iterparse(f, strip_cdata=False)
content = [ el.text for event,el in context ]
self.assertEqual(['test'], content)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(context.root))
def test_parser_encoding_unknown(self):
self.assertRaises(
LookupError, self.etree.XMLParser, encoding="hopefully unknown")
def test_parser_encoding(self):
self.etree.XMLParser(encoding="ascii")
self.etree.XMLParser(encoding="utf-8")
self.etree.XMLParser(encoding="iso-8859-1")
def test_feed_parser_recover(self):
parser = self.etree.XMLParser(recover=True)
parser.feed('<?xml version=')
parser.feed('"1.0"?><ro')
parser.feed('ot><')
parser.feed('a test="works"')
parser.feed('><othertag/></root') # <a> not closed!
parser.feed('>')
root = parser.close()
self.assertEqual(root.tag, "root")
self.assertEqual(len(root), 1)
self.assertEqual(root[0].tag, "a")
self.assertEqual(root[0].get("test"), "works")
self.assertEqual(len(root[0]), 1)
self.assertEqual(root[0][0].tag, "othertag")
# FIXME: would be nice to get some errors logged ...
#self.assertTrue(len(parser.error_log) > 0, "error log is empty")
def test_feed_parser_recover_no_id_dict(self):
# test that recover mode plays nicely with the no-id-dict setup
parser = self.etree.XMLParser(recover=True, collect_ids=False)
parser.feed('<?xml version=')
parser.feed('"1.0"?><ro')
parser.feed('ot xml:id="123"><')
parser.feed('a test="works" xml:id=')
parser.feed('"321"><othertag/></root') # <a> not closed!
parser.feed('>')
root = parser.close()
self.assertEqual(root.tag, "root")
self.assertEqual(len(root), 1)
self.assertEqual(root[0].tag, "a")
self.assertEqual(root[0].get("test"), "works")
self.assertEqual(root[0].attrib, {
'test': 'works',
'{http://www.w3.org/XML/1998/namespace}id': '321'})
self.assertEqual(len(root[0]), 1)
self.assertEqual(root[0][0].tag, "othertag")
# FIXME: would be nice to get some errors logged ...
#self.assertTrue(len(parser.error_log) > 0, "error log is empty")
def test_elementtree_parser_target_type_error(self):
assertEqual = self.assertEqual
assertFalse = self.assertFalse
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start")
assertFalse(attrib)
assertEqual("TAG", tag)
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
return "DONE" # no Element!
parser = self.etree.XMLParser(target=Target())
tree = self.etree.ElementTree()
self.assertRaises(TypeError,
tree.parse, BytesIO("<TAG/>"), parser=parser)
self.assertEqual(["start", "end"], events)
def test_parser_target_feed_exception(self):
# ET doesn't call .close() on errors
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
if tag == 'a':
raise ValueError("dead and gone")
def data(self, data):
events.append("data-" + data)
def close(self):
events.append("close")
return "DONE"
parser = self.etree.XMLParser(target=Target())
try:
parser.feed(_bytes('<root>A<a>ca</a>B</root>'))
done = parser.close()
self.fail("error expected, but parsing succeeded")
except ValueError:
done = 'value error received as expected'
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "close"],
events)
def test_parser_target_fromstring_exception(self):
# ET doesn't call .close() on errors
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
if tag == 'a':
raise ValueError("dead and gone")
def data(self, data):
events.append("data-" + data)
def close(self):
events.append("close")
return "DONE"
parser = self.etree.XMLParser(target=Target())
try:
done = self.etree.fromstring(_bytes('<root>A<a>ca</a>B</root>'),
parser=parser)
self.fail("error expected, but parsing succeeded")
except ValueError:
done = 'value error received as expected'
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "close"],
events)
def test_parser_target_feed_no_id_dict(self):
# test that target parsing works nicely with the no-id-hash setup
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def comment(self, text):
events.append("comment-" + text)
def close(self):
return "DONE"
parser = self.etree.XMLParser(target=Target(), collect_ids=False)
parser.feed(_bytes('<!--a--><root xml:id="123">A<!--b-->'))
parser.feed(_bytes('<sub xml:id="321"/>B</root>'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["comment-a", "start-root", "data-A", "comment-b",
"start-sub", "end-sub", "data-B", "end-root"],
events)
def test_parser_target_comment(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def comment(self, text):
events.append("comment-" + text)
def close(self):
return "DONE"
parser = self.etree.XMLParser(target=Target())
parser.feed(_bytes('<!--a--><root>A<!--b--><sub/><!--c-->B</root><!--d-->'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["comment-a", "start-root", "data-A", "comment-b",
"start-sub", "end-sub", "comment-c", "data-B",
"end-root", "comment-d"],
events)
def test_parser_target_pi(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def pi(self, target, data):
events.append("pi-" + target + "-" + data)
def close(self):
return "DONE"
parser = self.etree.XMLParser(target=Target())
parser.feed(_bytes('<?test a?><root>A<?test b?>B</root><?test c?>'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["pi-test-a", "start-root", "data-A", "pi-test-b",
"data-B", "end-root", "pi-test-c"],
events)
def test_parser_target_cdata(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def close(self):
return "DONE"
parser = self.etree.XMLParser(target=Target(),
strip_cdata=False)
parser.feed(_bytes('<root>A<a><![CDATA[ca]]></a>B</root>'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "data-B", "end-root"],
events)
def test_parser_target_recover(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def close(self):
events.append("close")
return "DONE"
parser = self.etree.XMLParser(target=Target(),
recover=True)
parser.feed(_bytes('<root>A<a>ca</a>B</not-root>'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "data-B",
"end-root", "close"],
events)
def test_iterwalk_tag(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b><d/></b><c/></a>'))
iterator = iterwalk(root, tag="b", events=('start', 'end'))
events = list(iterator)
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
def test_iterwalk_tag_all(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b><d/></b><c/></a>'))
iterator = iterwalk(root, tag="*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(
8,
len(events))
def test_iterwalk(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
events = list(iterwalk(root))
self.assertEqual(
[('end', root[0]), ('end', root[1]), ('end', root)],
events)
def test_iterwalk_comments_root_element(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(
b'<!--C0--><a><!--Ca--><b><!--Cb--></b><!--Cc--><c/></a><!--C99-->')
iterator = iterwalk(root, events=('start', 'end', 'comment'))
events = list(iterator)
self.assertEqual(
[('start', root), ('comment', root[0]),
('start', root[1]), ('comment', root[1][0]), ('end', root[1]),
('comment', root[2]), ('start', root[3]), ('end', root[3]),
('end', root),
],
events)
def test_iterwalk_comments_tree(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(
b'<!--C0--><a><!--Ca--><b><!--Cb--></b><!--Cc--><c/></a><!--C99-->')
iterator = iterwalk(self.etree.ElementTree(root), events=('start', 'end', 'comment'))
events = list(iterator)
self.assertEqual(
[('comment', root.getprevious()),
('start', root), ('comment', root[0]), # <a>
('start', root[1]), ('comment', root[1][0]), ('end', root[1]), # <b>
('comment', root[2]), ('start', root[3]), ('end', root[3]), # <c>
('end', root), ('comment', root.getnext()),
],
events)
def test_iterwalk_pis_root_element(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(
b'<?C0?><a><?Ca?><b><?Cb?></b><?Cc?><c/></a><?C99?>')
iterator = iterwalk(root, events=('start', 'end', 'pi'))
events = list(iterator)
self.assertEqual(
[('start', root), ('pi', root[0]),
('start', root[1]), ('pi', root[1][0]), ('end', root[1]),
('pi', root[2]), ('start', root[3]), ('end', root[3]),
('end', root),
],
events)
def test_iterwalk_pis_tree(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(
b'<?C0?><a><?Ca?><b><?Cb?></b><?Cc?><c/></a><?C99?>')
iterator = iterwalk(self.etree.ElementTree(root), events=('start', 'end', 'pi'))
events = list(iterator)
self.assertEqual(
[('pi', root.getprevious()),
('start', root), ('pi', root[0]), # <a>
('start', root[1]), ('pi', root[1][0]), ('end', root[1]), # <b>
('pi', root[2]), ('start', root[3]), ('end', root[3]), # <c>
('end', root), ('pi', root.getnext()),
],
events)
def test_iterwalk_pis_comments_tree(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(
b'<!--C0--><?C0?><!--C1--><a><?Ca?><b><!--Cb--></b><?Cc?><c/></a><!--C99--><?C99?>')
iterator = iterwalk(self.etree.ElementTree(root), events=('start', 'end', 'pi', 'comment'))
events = list(iterator)
self.assertEqual(
[('comment', root.getprevious().getprevious().getprevious()),
('pi', root.getprevious().getprevious()),
('comment', root.getprevious()),
('start', root), ('pi', root[0]), # <a>
('start', root[1]), ('comment', root[1][0]), ('end', root[1]), # <b>
('pi', root[2]), ('start', root[3]), ('end', root[3]), # <c>
('end', root), ('comment', root.getnext()), ('pi', root.getnext().getnext()),
],
events)
def test_iterwalk_pis_comments_tree_no_events(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(
b'<!--C0--><?C0?><!--C1--><a><?Ca?><b><!--Cb--></b><?Cc?><c/></a><!--C99--><?C99?>')
iterator = iterwalk(self.etree.ElementTree(root), events=('start', 'end'))
events = list(iterator)
self.assertEqual(
[('start', root), # <a>
('start', root[1]), ('end', root[1]), # <b>
('start', root[3]), ('end', root[3]), # <c>
('end', root),
],
events)
def test_iterwalk_start(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
iterator = iterwalk(root, events=('start',))
events = list(iterator)
self.assertEqual(
[('start', root), ('start', root[0]), ('start', root[1])],
events)
def test_iterwalk_start_end(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
iterator = iterwalk(root, events=('start','end'))
events = list(iterator)
self.assertEqual(
[('start', root), ('start', root[0]), ('end', root[0]),
('start', root[1]), ('end', root[1]), ('end', root)],
events)
def test_iterwalk_start_tags(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/><b><d/></b></a>'))
iterator = iterwalk(root, events=('start',), tag='b')
events = list(iterator)
self.assertEqual(
[('start', root[0]), ('start', root[2])],
events)
def test_iterwalk_start_end_tags(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/><b><d/></b></a>'))
iterator = iterwalk(root, events=('start', 'end'), tag='b')
events = list(iterator)
self.assertEqual(
[('start', root[0]), ('end', root[0]), ('start', root[2]), ('end', root[2])],
events)
def test_iterwalk_start_end_tags_with_root(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/><b><d/></b></a>'))
iterator = iterwalk(root, events=('start', 'end'), tag=('b', 'a'))
events = list(iterator)
self.assertEqual(
[('start', root),
('start', root[0]), ('end', root[0]),
('start', root[2]), ('end', root[2]),
('end', root),
],
events)
def test_iterwalk_clear(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
iterator = iterwalk(root)
for event, elem in iterator:
elem.clear()
self.assertEqual(0,
len(root))
def test_iterwalk_attrib_ns(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a xmlns="ns1"><b><c xmlns="ns2"/></b></a>'))
attr_name = '{testns}bla'
events = []
iterator = iterwalk(root, events=('start','end','start-ns','end-ns'))
for event, elem in iterator:
events.append(event)
if event == 'start':
if elem.tag != '{ns1}a':
elem.set(attr_name, 'value')
self.assertEqual(
['start-ns', 'start', 'start', 'start-ns', 'start',
'end', 'end-ns', 'end', 'end', 'end-ns'],
events)
self.assertEqual(
None,
root.get(attr_name))
self.assertEqual(
'value',
root[0].get(attr_name))
def test_iterwalk_end_skip(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b><c/></b><d><e/></d></a>'))
iterator = iterwalk(root)
tags = []
for event, elem in iterator:
tags.append(elem.tag)
# requesting a skip after an 'end' event should never have an effect
iterator.skip_subtree()
self.assertEqual(['c', 'b', 'e', 'd', 'a'], tags)
def test_iterwalk_start_end_skip(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b><c/></b><d><e/></d></a>'))
iterator = iterwalk(root, events=('start', 'end'))
tags = []
for event, elem in iterator:
tags.append((event, elem.tag))
if elem.tag in ('b', 'e'):
# skipping should only have an effect on 'start', not on 'end'
iterator.skip_subtree()
self.assertEqual(
[('start', 'a'),
('start', 'b'), ('end', 'b'), # ignored child 'c'
('start', 'd'),
('start', 'e'), ('end', 'e'),
('end', 'd'),
('end', 'a')],
tags)
def test_iterwalk_ns_skip(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes(
'<a xmlns="ns1"><b xmlns="nsb"><c xmlns="ns2"/></b><d xmlns="ns2"><e/></d></a>'))
events = []
iterator = iterwalk(root, events=('start','start-ns','end-ns'))
for event, elem in iterator:
if event in ('start-ns', 'end-ns'):
events.append((event, elem))
if event == 'start-ns' and elem == ('', 'nsb'):
events.append('skip')
iterator.skip_subtree()
else:
events.append((event, elem.tag))
self.assertEqual(
[('start-ns', ('', 'ns1')),
('start', '{ns1}a'),
('start-ns', ('', 'nsb')),
'skip',
('start', '{nsb}b'),
('end-ns', None),
('start-ns', ('', 'ns2')),
('start', '{ns2}d'),
('start', '{ns2}e'),
('end-ns', None),
('end-ns', None)
],
events)
def test_iterwalk_getiterator(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b><d/></b><c/></a>'))
counts = []
for event, elem in iterwalk(root):
counts.append(len(list(elem.getiterator())))
self.assertEqual(
[1,2,1,4],
counts)
def test_itertext_comment_pi(self):
# https://bugs.launchpad.net/lxml/+bug/1844674
XML = self.etree.XML
root = XML(_bytes(
"<root>RTEXT<a></a>ATAIL<b/><!-- COMMENT -->CTAIL<?PI PITEXT?> PITAIL </root>"
))
text = list(root.itertext())
self.assertEqual(["RTEXT", "ATAIL", "CTAIL", " PITAIL "],
text)
def test_resolve_string_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_string(
_str('''<!ENTITY myentity "%s">
<!ELEMENT doc ANY>''') % url, context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(root.text, test_url)
def test_resolve_bytes_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_string(
(_str('''<!ENTITY myentity "%s">
<!ELEMENT doc ANY>''') % url).encode('utf-8'),
context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(root.text, test_url)
def test_resolve_filelike_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_file(
SillyFileLike(
_str('''<!ENTITY myentity "%s">
<!ELEMENT doc ANY>''') % url), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(root.text, test_url)
def test_resolve_filename_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(attribute_defaults=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_filename(
fileInTestDir('test.dtd'), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE a SYSTEM "%s"><a><b/></a>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(
root.attrib, {'default': 'valueA'})
self.assertEqual(
root[0].attrib, {'default': 'valueB'})
def test_resolve_filename_dtd_relative(self):
parse = self.etree.parse
parser = self.etree.XMLParser(attribute_defaults=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
expected = fileUrlInTestDir(test_url)
url = url.replace('file://', 'file:') # depends on libxml2 version
expected = expected.replace('file://', 'file:')
assertEqual(url, expected)
return self.resolve_filename(
fileUrlInTestDir('test.dtd'), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE a SYSTEM "%s"><a><b/></a>') % test_url
tree = parse(StringIO(xml), parser,
base_url=fileUrlInTestDir('__test.xml'))
root = tree.getroot()
self.assertEqual(
root.attrib, {'default': 'valueA'})
self.assertEqual(
root[0].attrib, {'default': 'valueB'})
def test_resolve_file_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(attribute_defaults=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_file(
open(fileInTestDir('test.dtd'), 'rb'), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE a SYSTEM "%s"><a><b/></a>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(
root.attrib, {'default': 'valueA'})
self.assertEqual(
root[0].attrib, {'default': 'valueB'})
def test_resolve_empty(self):
parse = self.etree.parse
parser = self.etree.XMLParser(load_dtd=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class check(object):
resolved = False
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
check.resolved = True
return self.resolve_empty(context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
self.assertRaises(etree.XMLSyntaxError, parse, StringIO(xml), parser)
self.assertTrue(check.resolved)
def test_resolve_error(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
class _LocalException(Exception):
pass
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
raise _LocalException
parser.resolvers.add(MyResolver())
xml = '<!DOCTYPE doc SYSTEM "test"><doc>&myentity;</doc>'
self.assertRaises(_LocalException, parse, BytesIO(xml), parser)
def test_entity_parse(self):
parse = self.etree.parse
tostring = self.etree.tostring
parser = self.etree.XMLParser(resolve_entities=False)
Entity = self.etree.Entity
xml = _bytes('<!DOCTYPE doc SYSTEM "test"><doc>&myentity;</doc>')
tree = parse(BytesIO(xml), parser)
root = tree.getroot()
self.assertEqual(root[0].tag, Entity)
self.assertEqual(root[0].text, "&myentity;")
self.assertEqual(root[0].tail, None)
self.assertEqual(root[0].name, "myentity")
self.assertEqual(_bytes('<doc>&myentity;</doc>'),
tostring(root))
def test_entity_restructure(self):
xml = _bytes('''<!DOCTYPE root [ <!ENTITY nbsp " "> ]>
<root>
<child1/>
<child2/>
<child3> </child3>
</root>''')
parser = self.etree.XMLParser(resolve_entities=False)
root = etree.fromstring(xml, parser)
self.assertEqual([ el.tag for el in root ],
['child1', 'child2', 'child3'])
root[0] = root[-1]
self.assertEqual([ el.tag for el in root ],
['child3', 'child2'])
self.assertEqual(root[0][0].text, ' ')
self.assertEqual(root[0][0].name, 'nbsp')
def test_entity_append(self):
Entity = self.etree.Entity
Element = self.etree.Element
tostring = self.etree.tostring
root = Element("root")
root.append( Entity("test") )
self.assertEqual(root[0].tag, Entity)
self.assertEqual(root[0].text, "&test;")
self.assertEqual(root[0].tail, None)
self.assertEqual(root[0].name, "test")
self.assertEqual(_bytes('<root>&test;</root>'),
tostring(root))
def test_entity_append_parsed(self):
Entity = self.etree.Entity
Element = self.etree.Element
parser = self.etree.XMLParser(resolve_entities=False)
entity = self.etree.XML('''<!DOCTYPE data [
<!ENTITY a "a">
<!ENTITY b "&a;">
]>
<data>&b;</data>
''', parser)
el = Element('test')
el.append(entity)
self.assertEqual(el.tag, 'test')
self.assertEqual(el[0].tag, 'data')
self.assertEqual(el[0][0].tag, Entity)
self.assertEqual(el[0][0].name, 'b')
def test_entity_values(self):
Entity = self.etree.Entity
self.assertEqual(Entity("test").text, '&test;')
self.assertEqual(Entity("#17683").text, '䔓')
self.assertEqual(Entity("#x1768").text, 'ᝨ')
self.assertEqual(Entity("#x98AF").text, '颯')
def test_entity_error(self):
Entity = self.etree.Entity
self.assertRaises(ValueError, Entity, 'a b c')
self.assertRaises(ValueError, Entity, 'a,b')
self.assertRaises(ValueError, Entity, 'a\0b')
self.assertRaises(ValueError, Entity, '#abc')
self.assertRaises(ValueError, Entity, '#xxyz')
def test_cdata(self):
CDATA = self.etree.CDATA
Element = self.etree.Element
tostring = self.etree.tostring
root = Element("root")
root.text = CDATA('test')
self.assertEqual('test',
root.text)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(root))
def test_cdata_tail(self):
CDATA = self.etree.CDATA
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
root = Element("root")
child = SubElement(root, 'child')
child.tail = CDATA('test')
self.assertEqual('test', child.tail)
self.assertEqual(_bytes('<root><child/><![CDATA[test]]></root>'),
tostring(root))
root = Element("root")
root.tail = CDATA('test')
self.assertEqual('test', root.tail)
self.assertEqual(_bytes('<root/><![CDATA[test]]>'),
tostring(root))
def test_cdata_type(self):
CDATA = self.etree.CDATA
Element = self.etree.Element
root = Element("root")
root.text = CDATA("test")
self.assertEqual('test', root.text)
root.text = CDATA(_str("test"))
self.assertEqual('test', root.text)
self.assertRaises(TypeError, CDATA, 1)
def test_cdata_errors(self):
CDATA = self.etree.CDATA
Element = self.etree.Element
root = Element("root")
cdata = CDATA('test')
self.assertRaises(TypeError,
root.set, 'attr', cdata)
self.assertRaises(TypeError,
operator.setitem, root.attrib, 'attr', cdata)
def test_cdata_parser(self):
tostring = self.etree.tostring
parser = self.etree.XMLParser(strip_cdata=False)
root = self.etree.XML(_bytes('<root><![CDATA[test]]></root>'), parser)
self.assertEqual('test', root.text)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(root))
def test_cdata_xpath(self):
tostring = self.etree.tostring
parser = self.etree.XMLParser(strip_cdata=False)
root = self.etree.XML(_bytes('<root><![CDATA[test]]></root>'), parser)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(root))
self.assertEqual(['test'], root.xpath('//text()'))
# TypeError in etree, AssertionError in ElementTree;
def test_setitem_assert(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
self.assertRaises(TypeError,
a.__setitem__, 0, 'foo')
def test_append_error(self):
Element = self.etree.Element
root = Element('root')
# raises AssertionError in ElementTree
self.assertRaises(TypeError, root.append, None)
self.assertRaises(TypeError, root.extend, [None])
self.assertRaises(TypeError, root.extend, [Element('one'), None])
self.assertEqual('one', root[0].tag)
def test_append_recursive_error(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
self.assertRaises(ValueError, root.append, root)
child = SubElement(root, 'child')
self.assertRaises(ValueError, child.append, root)
child2 = SubElement(child, 'child2')
self.assertRaises(ValueError, child2.append, root)
self.assertRaises(ValueError, child2.append, child)
self.assertEqual('child2', root[0][0].tag)
def test_addnext(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
SubElement(root, 'a')
SubElement(root, 'b')
self.assertEqual(['a', 'b'],
[c.tag for c in root])
root[1].addnext(root[0])
self.assertEqual(['b', 'a'],
[c.tag for c in root])
def test_addprevious(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
SubElement(root, 'a')
SubElement(root, 'b')
self.assertEqual(['a', 'b'],
[c.tag for c in root])
root[0].addprevious(root[1])
self.assertEqual(['b', 'a'],
[c.tag for c in root])
def test_addnext_cycle(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(a, 'b')
# appending parent as sibling is forbidden
self.assertRaises(ValueError, b.addnext, a)
self.assertEqual(['a'], [c.tag for c in root])
self.assertEqual(['b'], [c.tag for c in a])
def test_addprevious_cycle(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(a, 'b')
# appending parent as sibling is forbidden
self.assertRaises(ValueError, b.addprevious, a)
self.assertEqual(['a'], [c.tag for c in root])
self.assertEqual(['b'], [c.tag for c in a])
def test_addnext_cycle_long(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(a, 'b')
c = SubElement(b, 'c')
# appending parent as sibling is forbidden
self.assertRaises(ValueError, c.addnext, a)
def test_addprevious_cycle_long(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(a, 'b')
c = SubElement(b, 'c')
# appending parent as sibling is forbidden
self.assertRaises(ValueError, c.addprevious, a)
def test_addprevious_noops(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(root, 'b')
a.addprevious(a)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
b.addprevious(b)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
b.addprevious(a)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
def test_addnext_noops(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(root, 'b')
a.addnext(a)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
b.addnext(b)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
a.addnext(b)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
def test_addnext_root(self):
Element = self.etree.Element
a = Element('a')
b = Element('b')
self.assertRaises(TypeError, a.addnext, b)
def test_addprevious_pi(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
PI = self.etree.PI
root = Element('root')
SubElement(root, 'a')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addprevious(pi)
self.assertEqual(_bytes('<root><?TARGET TEXT?>TAIL<a></a></root>'),
self._writeElement(root))
def test_addprevious_root_pi(self):
Element = self.etree.Element
PI = self.etree.PI
root = Element('root')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addprevious(pi)
self.assertEqual(_bytes('<?TARGET TEXT?>\n<root></root>'),
self._writeElement(root))
def test_addnext_pi(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
PI = self.etree.PI
root = Element('root')
SubElement(root, 'a')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addnext(pi)
self.assertEqual(_bytes('<root><a></a><?TARGET TEXT?>TAIL</root>'),
self._writeElement(root))
def test_addnext_root_pi(self):
Element = self.etree.Element
PI = self.etree.PI
root = Element('root')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addnext(pi)
self.assertEqual(_bytes('<root></root>\n<?TARGET TEXT?>'),
self._writeElement(root))
def test_addnext_comment(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
root = Element('root')
SubElement(root, 'a')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addnext(comment)
self.assertEqual(_bytes('<root><a></a><!--TEXT -->TAIL</root>'),
self._writeElement(root))
def test_addnext_root_comment(self):
Element = self.etree.Element
Comment = self.etree.Comment
root = Element('root')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addnext(comment)
self.assertEqual(_bytes('<root></root>\n<!--TEXT -->'),
self._writeElement(root))
def test_addprevious_comment(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
root = Element('root')
SubElement(root, 'a')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addprevious(comment)
self.assertEqual(_bytes('<root><!--TEXT -->TAIL<a></a></root>'),
self._writeElement(root))
def test_addprevious_root_comment(self):
Element = self.etree.Element
Comment = self.etree.Comment
root = Element('root')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addprevious(comment)
self.assertEqual(_bytes('<!--TEXT -->\n<root></root>'),
self._writeElement(root))
# ET's Elements have items() and key(), but not values()
def test_attribute_values(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>'))
values = root.values()
values.sort()
self.assertEqual(['Alpha', 'Beta', 'Gamma'], values)
# gives error in ElementTree
def test_comment_empty(self):
Element = self.etree.Element
Comment = self.etree.Comment
a = Element('a')
a.append(Comment())
self.assertEqual(
_bytes('<a><!----></a>'),
self._writeElement(a))
# ElementTree ignores comments
def test_comment_parse_empty(self):
ElementTree = self.etree.ElementTree
tostring = self.etree.tostring
xml = _bytes('<a><b/><!----><c/></a>')
f = BytesIO(xml)
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
'',
a[1].text)
self.assertEqual(
xml,
tostring(a))
# ElementTree ignores comments
def test_comment_no_proxy_yet(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a><b></b><!-- hoi --><c></c></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
' hoi ',
a[1].text)
# does not raise an exception in ElementTree
def test_comment_immutable(self):
Element = self.etree.Element
Comment = self.etree.Comment
c = Comment()
el = Element('myel')
self.assertRaises(TypeError, c.append, el)
self.assertRaises(TypeError, c.insert, 0, el)
self.assertRaises(TypeError, c.set, "myattr", "test")
def test_comment_immutable_attrib(self):
c = self.etree.Comment()
self.assertEqual(0, len(c.attrib))
self.assertFalse(c.attrib.__contains__('nope'))
self.assertFalse('nope' in c.attrib)
self.assertFalse('nope' in c.attrib.keys())
self.assertFalse('nope' in c.attrib.values())
self.assertFalse(('nope', 'huhu') in c.attrib.items())
self.assertEqual([], list(c.attrib))
self.assertEqual([], list(c.attrib.keys()))
self.assertEqual([], list(c.attrib.items()))
self.assertEqual([], list(c.attrib.values()))
self.assertEqual([], list(c.attrib.iterkeys()))
self.assertEqual([], list(c.attrib.iteritems()))
self.assertEqual([], list(c.attrib.itervalues()))
self.assertEqual('HUHU', c.attrib.pop('nope', 'HUHU'))
self.assertRaises(KeyError, c.attrib.pop, 'nope')
self.assertRaises(KeyError, c.attrib.__getitem__, 'only')
self.assertRaises(KeyError, c.attrib.__getitem__, 'names')
self.assertRaises(KeyError, c.attrib.__getitem__, 'nope')
self.assertRaises(KeyError, c.attrib.__setitem__, 'nope', 'yep')
self.assertRaises(KeyError, c.attrib.__delitem__, 'nope')
# test passing 'None' to dump()
def test_dump_none(self):
self.assertRaises(TypeError, self.etree.dump, None)
def test_prefix(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a xmlns:foo="http://www.infrae.com/ns/1"><foo:b/></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
None,
a.prefix)
self.assertEqual(
'foo',
a[0].prefix)
def test_prefix_default_ns(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a xmlns="http://www.infrae.com/ns/1"><b/></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
None,
a.prefix)
self.assertEqual(
None,
a[0].prefix)
def test_getparent(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
None,
a.getparent())
self.assertEqual(
a,
b.getparent())
self.assertEqual(
b.getparent(),
c.getparent())
self.assertEqual(
b,
d.getparent())
def test_iterchildren(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
for el in root.iterchildren():
result.append(el.tag)
self.assertEqual(['one', 'two', 'three'], result)
def test_iterchildren_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
for el in root.iterchildren(reversed=True):
result.append(el.tag)
self.assertEqual(['three', 'two', 'one'], result)
def test_iterchildren_tag(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two></doc>'))
result = []
for el in root.iterchildren(tag='two'):
result.append(el.text)
self.assertEqual(['Two', 'Bla'], result)
def test_iterchildren_tag_posarg(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two></doc>'))
result = []
for el in root.iterchildren('two'):
result.append(el.text)
self.assertEqual(['Two', 'Bla'], result)
def test_iterchildren_tag_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two></doc>'))
result = []
for el in root.iterchildren(reversed=True, tag='two'):
result.append(el.text)
self.assertEqual(['Bla', 'Two'], result)
def test_iterchildren_tag_multiple(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two><three/></doc>'))
result = []
for el in root.iterchildren(tag=['two', 'three']):
result.append(el.text)
self.assertEqual(['Two', 'Bla', None], result)
def test_iterchildren_tag_multiple_posarg(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two><three/></doc>'))
result = []
for el in root.iterchildren('two', 'three'):
result.append(el.text)
self.assertEqual(['Two', 'Bla', None], result)
def test_iterchildren_tag_multiple_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two><three/></doc>'))
result = []
for el in root.iterchildren(reversed=True, tag=['two', 'three']):
result.append(el.text)
self.assertEqual([None, 'Bla', 'Two'], result)
def test_iterancestors(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.iterancestors()))
self.assertEqual(
[a],
list(b.iterancestors()))
self.assertEqual(
[a],
list(c.iterancestors()))
self.assertEqual(
[b, a],
list(d.iterancestors()))
def test_iterancestors_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[a],
list(d.iterancestors('a')))
self.assertEqual(
[a],
list(d.iterancestors(tag='a')))
self.assertEqual(
[b, a],
list(d.iterancestors('*')))
self.assertEqual(
[b, a],
list(d.iterancestors(tag='*')))
def test_iterancestors_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[b, a],
list(d.iterancestors(tag=('a', 'b'))))
self.assertEqual(
[b, a],
list(d.iterancestors('a', 'b')))
self.assertEqual(
[],
list(d.iterancestors(tag=('w', 'x', 'y', 'z'))))
self.assertEqual(
[],
list(d.iterancestors('w', 'x', 'y', 'z')))
self.assertEqual(
[],
list(d.iterancestors(tag=('d', 'x'))))
self.assertEqual(
[],
list(d.iterancestors('d', 'x')))
self.assertEqual(
[b, a],
list(d.iterancestors(tag=('b', '*'))))
self.assertEqual(
[b, a],
list(d.iterancestors('b', '*')))
self.assertEqual(
[b],
list(d.iterancestors(tag=('b', 'c'))))
self.assertEqual(
[b],
list(d.iterancestors('b', 'c')))
def test_iterdescendants(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[b, d, c, e],
list(a.iterdescendants()))
self.assertEqual(
[],
list(d.iterdescendants()))
def test_iterdescendants_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[],
list(a.iterdescendants('a')))
self.assertEqual(
[],
list(a.iterdescendants(tag='a')))
a2 = SubElement(e, 'a')
self.assertEqual(
[a2],
list(a.iterdescendants('a')))
self.assertEqual(
[a2],
list(c.iterdescendants('a')))
self.assertEqual(
[a2],
list(c.iterdescendants(tag='a')))
def test_iterdescendants_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[b, e],
list(a.iterdescendants(tag=('a', 'b', 'e'))))
self.assertEqual(
[b, e],
list(a.iterdescendants('a', 'b', 'e')))
a2 = SubElement(e, 'a')
self.assertEqual(
[b, a2],
list(a.iterdescendants(tag=('a', 'b'))))
self.assertEqual(
[b, a2],
list(a.iterdescendants('a', 'b')))
self.assertEqual(
[],
list(c.iterdescendants(tag=('x', 'y', 'z'))))
self.assertEqual(
[],
list(c.iterdescendants('x', 'y', 'z')))
self.assertEqual(
[b, d, c, e, a2],
list(a.iterdescendants(tag=('x', 'y', 'z', '*'))))
self.assertEqual(
[b, d, c, e, a2],
list(a.iterdescendants('x', 'y', 'z', '*')))
def test_getroottree(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
a,
a.getroottree().getroot())
self.assertEqual(
a,
b.getroottree().getroot())
self.assertEqual(
a,
d.getroottree().getroot())
def test_getnext(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertEqual(
None,
a.getnext())
self.assertEqual(
c,
b.getnext())
self.assertEqual(
None,
c.getnext())
def test_getprevious(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
None,
a.getprevious())
self.assertEqual(
b,
c.getprevious())
self.assertEqual(
None,
b.getprevious())
def test_itersiblings(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.itersiblings()))
self.assertEqual(
[c],
list(b.itersiblings()))
self.assertEqual(
[],
list(c.itersiblings()))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True)))
self.assertEqual(
[],
list(b.itersiblings(preceding=True)))
def test_itersiblings_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.itersiblings(tag='XXX')))
self.assertEqual(
[c],
list(b.itersiblings(tag='c')))
self.assertEqual(
[c],
list(b.itersiblings(tag='*')))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True, tag='b')))
self.assertEqual(
[],
list(c.itersiblings(preceding=True, tag='c')))
def test_itersiblings_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(a, 'e')
self.assertEqual(
[],
list(a.itersiblings(tag=('XXX', 'YYY'))))
self.assertEqual(
[c, e],
list(b.itersiblings(tag=('c', 'd', 'e'))))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True, tag=('b', 'b', 'c', 'd'))))
self.assertEqual(
[c, b],
list(e.itersiblings(preceding=True, tag=('c', '*'))))
def test_parseid(self):
parseid = self.etree.parseid
XML = self.etree.XML
xml_text = _bytes('''
<!DOCTYPE document [
<!ELEMENT document (h1,p)*>
<!ELEMENT h1 (#PCDATA)>
<!ATTLIST h1 myid ID #REQUIRED>
<!ELEMENT p (#PCDATA)>
<!ATTLIST p someid ID #REQUIRED>
]>
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p xml:id="xmlid">XML:ID paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
tree, dic = parseid(BytesIO(xml_text))
root = tree.getroot()
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {
"chapter1" : root[0],
"xmlid" : root[3],
"warn1" : root[4]
}
self.assertTrue("chapter1" in dic)
self.assertTrue("warn1" in dic)
self.assertTrue("xmlid" in dic)
self._checkIDDict(dic, expected)
def test_XMLDTDID(self):
XMLDTDID = self.etree.XMLDTDID
XML = self.etree.XML
xml_text = _bytes('''
<!DOCTYPE document [
<!ELEMENT document (h1,p)*>
<!ELEMENT h1 (#PCDATA)>
<!ATTLIST h1 myid ID #REQUIRED>
<!ELEMENT p (#PCDATA)>
<!ATTLIST p someid ID #REQUIRED>
]>
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p xml:id="xmlid">XML:ID paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
root, dic = XMLDTDID(xml_text)
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {
"chapter1" : root[0],
"xmlid" : root[3],
"warn1" : root[4]
}
self.assertTrue("chapter1" in dic)
self.assertTrue("warn1" in dic)
self.assertTrue("xmlid" in dic)
self._checkIDDict(dic, expected)
def test_XMLDTDID_empty(self):
XMLDTDID = self.etree.XMLDTDID
XML = self.etree.XML
xml_text = _bytes('''
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
root, dic = XMLDTDID(xml_text)
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {}
self._checkIDDict(dic, expected)
def test_XMLDTDID_no_id_dict(self):
XMLDTDID = self.etree.XMLDTDID
XML = self.etree.XML
xml_text = _bytes('''
<!DOCTYPE document [
<!ELEMENT document (h1,p)*>
<!ELEMENT h1 (#PCDATA)>
<!ATTLIST h1 myid ID #REQUIRED>
<!ELEMENT p (#PCDATA)>
<!ATTLIST p someid ID #REQUIRED>
]>
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p xml:id="xmlid">XML:ID paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
parser = etree.XMLParser(collect_ids=False)
root, dic = XMLDTDID(xml_text, parser=parser)
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
self.assertFalse(dic)
self._checkIDDict(dic, {})
def _checkIDDict(self, dic, expected):
self.assertEqual(len(dic),
len(expected))
self.assertEqual(sorted(dic.items()),
sorted(expected.items()))
if sys.version_info < (3,):
self.assertEqual(sorted(dic.iteritems()),
sorted(expected.iteritems()))
self.assertEqual(sorted(dic.keys()),
sorted(expected.keys()))
if sys.version_info < (3,):
self.assertEqual(sorted(dic.iterkeys()),
sorted(expected.iterkeys()))
if sys.version_info < (3,):
self.assertEqual(sorted(dic.values()),
sorted(expected.values()))
self.assertEqual(sorted(dic.itervalues()),
sorted(expected.itervalues()))
def test_register_namespace_xml(self):
self.assertRaises(ValueError, self.etree.register_namespace,
"XML", "http://www.w3.org/XML/1998/namespace")
self.assertRaises(ValueError, self.etree.register_namespace,
"xml", "http://www.w3.org/XML/2345")
self.etree.register_namespace("xml", "http://www.w3.org/XML/1998/namespace") # ok
def test_namespaces(self):
etree = self.etree
r = {'foo': 'http://ns.infrae.com/foo'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
self.assertEqual(
'foo',
e.prefix)
self.assertEqual(
_bytes('<foo:bar xmlns:foo="http://ns.infrae.com/foo"></foo:bar>'),
self._writeElement(e))
def test_namespaces_default(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
self.assertEqual(
None,
e.prefix)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e.tag)
self.assertEqual(
_bytes('<bar xmlns="http://ns.infrae.com/foo"></bar>'),
self._writeElement(e))
def test_namespaces_default_and_other(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo', 'p': 'http://test/'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
self.assertEqual(None, e.prefix)
self.assertEqual('{http://ns.infrae.com/foo}bar', e.tag)
self.assertEqual(
_bytes('<bar xmlns="http://ns.infrae.com/foo" xmlns:p="http://test/"></bar>'),
self._writeElement(e))
def test_namespaces_default_and_attr(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e.set('{http://ns.infrae.com/hoi}test', 'value')
self.assertEqual(
_bytes('<bar xmlns="http://ns.infrae.com/foo" xmlns:hoi="http://ns.infrae.com/hoi" hoi:test="value"></bar>'),
self._writeElement(e))
def test_attribute_keeps_namespace_prefix_on_merge(self):
etree = self.etree
root = etree.Element('{http://test/ns}root',
nsmap={None: 'http://test/ns'})
sub = etree.Element('{http://test/ns}sub',
nsmap={'test': 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<test:sub xmlns:test="http://test/ns" test:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<root xmlns="http://test/ns">'
'<sub xmlns:test="http://test/ns" test:attr="value"/>'
'</root>'),
etree.tostring(root))
def test_attribute_keeps_namespace_prefix_on_merge_with_nons(self):
etree = self.etree
root = etree.Element('root')
sub = etree.Element('{http://test/ns}sub',
nsmap={'test': 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<test:sub xmlns:test="http://test/ns" test:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<root>'
'<test:sub xmlns:test="http://test/ns" test:attr="value"/>'
'</root>'),
etree.tostring(root))
def test_attribute_gets_namespace_prefix_on_merge_with_nons(self):
etree = self.etree
root = etree.Element('root')
sub = etree.Element('{http://test/ns}sub',
nsmap={None: 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<sub xmlns="http://test/ns" '
'xmlns:ns0="http://test/ns" ns0:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<root>'
'<sub xmlns="http://test/ns"'
' xmlns:ns0="http://test/ns" ns0:attr="value"/>'
'</root>'),
etree.tostring(root))
def test_attribute_gets_namespace_prefix_on_merge(self):
etree = self.etree
root = etree.Element('{http://test/ns}root',
nsmap={'test': 'http://test/ns',
None: 'http://test/ns'})
sub = etree.Element('{http://test/ns}sub',
nsmap={None: 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<sub xmlns="http://test/ns" '
'xmlns:ns0="http://test/ns" ns0:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<test:root xmlns:test="http://test/ns" xmlns="http://test/ns">'
'<test:sub test:attr="value"/>'
'</test:root>'),
etree.tostring(root))
def test_namespaces_elementtree(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}z', nsmap=r)
tree = etree.ElementTree(element=e)
etree.SubElement(e, '{http://ns.infrae.com/hoi}x')
self.assertEqual(
_bytes('<z xmlns="http://ns.infrae.com/foo" xmlns:hoi="http://ns.infrae.com/hoi"><hoi:x></hoi:x></z>'),
self._writeElement(e))
def test_namespaces_default_copy_element(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo'}
e1 = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e2 = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e1.append(e2)
self.assertEqual(
None,
e1.prefix)
self.assertEqual(
None,
e1[0].prefix)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e1.tag)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e1[0].tag)
def test_namespaces_copy_element(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/BAR'}
e1 = etree.Element('{http://ns.infrae.com/BAR}bar', nsmap=r)
e2 = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e1.append(e2)
self.assertEqual(
None,
e1.prefix)
self.assertNotEqual(
None,
e2.prefix)
self.assertEqual(
'{http://ns.infrae.com/BAR}bar',
e1.tag)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e2.tag)
def test_namespaces_reuse_after_move(self):
ns_href = "http://a.b.c"
one = self.etree.fromstring(
_bytes('<foo><bar xmlns:ns="%s"><ns:baz/></bar></foo>' % ns_href))
baz = one[0][0]
two = self.etree.fromstring(
_bytes('<root xmlns:ns="%s"/>' % ns_href))
two.append(baz)
del one # make sure the source document is deallocated
self.assertEqual('{%s}baz' % ns_href, baz.tag)
self.assertEqual(
_bytes('<root xmlns:ns="%s"><ns:baz/></root>' % ns_href),
self.etree.tostring(two))
def test_namespace_cleanup(self):
xml = _bytes(
'<foo xmlns="F" xmlns:x="x">'
'<bar xmlns:ns="NS" xmlns:b="b" xmlns="B">'
'<ns:baz/>'
'</bar></foo>'
)
root = self.etree.fromstring(xml)
self.assertEqual(xml, self.etree.tostring(root))
self.etree.cleanup_namespaces(root)
self.assertEqual(
_bytes('<foo xmlns="F"><bar xmlns:ns="NS" xmlns="B"><ns:baz/></bar></foo>'),
self.etree.tostring(root))
def test_namespace_cleanup_attributes(self):
xml = _bytes(
'<foo xmlns="F" xmlns:x="X" xmlns:a="A">'
'<bar xmlns:ns="NS" xmlns:b="b" xmlns="B">'
'<ns:baz a:test="attr"/>'
'</bar></foo>'
)
root = self.etree.fromstring(xml)
self.assertEqual(xml, self.etree.tostring(root))
self.etree.cleanup_namespaces(root)
self.assertEqual(
_bytes('<foo xmlns="F" xmlns:a="A">'
'<bar xmlns:ns="NS" xmlns="B">'
'<ns:baz a:test="attr"/>'
'</bar></foo>'),
self.etree.tostring(root))
def test_namespace_cleanup_many(self):
xml = ('<n12:foo ' +
' '.join('xmlns:n{n}="NS{n}"'.format(n=i) for i in range(100)) +
'><n68:a/></n12:foo>').encode('utf8')
root = self.etree.fromstring(xml)
self.assertEqual(xml, self.etree.tostring(root))
self.etree.cleanup_namespaces(root)
self.assertEqual(
b'<n12:foo xmlns:n12="NS12" xmlns:n68="NS68"><n68:a/></n12:foo>',
self.etree.tostring(root))
def test_namespace_cleanup_deep(self):
xml = ('<root>' +
''.join('<a xmlns:n{n}="NS{n}">'.format(n=i) for i in range(100)) +
'<n64:x/>' + '</a>'*100 + '</root>').encode('utf8')
root = self.etree.fromstring(xml)
self.assertEqual(xml, self.etree.tostring(root))
self.etree.cleanup_namespaces(root)
self.assertEqual(
b'<root>' + b'<a>'*64 + b'<a xmlns:n64="NS64">' + b'<a>'*35 +
b'<n64:x/>' + b'</a>'*100 + b'</root>',
self.etree.tostring(root))
def test_namespace_cleanup_deep_to_top(self):
xml = ('<root>' +
''.join('<a xmlns:n{n}="NS{n}">'.format(n=i) for i in range(100)) +
'<n64:x xmlns:a="A" a:attr="X"/>' +
'</a>'*100 +
'</root>').encode('utf8')
root = self.etree.fromstring(xml)
self.assertEqual(xml, self.etree.tostring(root))
self.etree.cleanup_namespaces(root, top_nsmap={'n64': 'NS64'})
self.assertEqual(
b'<root xmlns:n64="NS64">' + b'<a>'*100 +
b'<n64:x xmlns:a="A" a:attr="X"/>' + b'</a>'*100 + b'</root>',
self.etree.tostring(root))
def test_namespace_cleanup_keep_prefixes(self):
xml = ('<root xmlns:n64="NS64" xmlns:foo="FOO" xmlns:unused1="UNUSED" xmlns:no="NO">'
'<a xmlns:unused2="UNUSED"><n64:x xmlns:a="A" a:attr="X"/></a>'
'<foo>foo:bar</foo>'
'</root>').encode('utf8')
root = self.etree.fromstring(xml)
self.assertEqual(xml, self.etree.tostring(root))
self.etree.cleanup_namespaces(root, keep_ns_prefixes=['foo'])
self.assertEqual(
b'<root xmlns:n64="NS64" xmlns:foo="FOO">'
b'<a><n64:x xmlns:a="A" a:attr="X"/></a>'
b'<foo>foo:bar</foo>'
b'</root>',
self.etree.tostring(root))
def test_namespace_cleanup_keep_prefixes_top(self):
xml = ('<root xmlns:n64="NS64" xmlns:unused1="UNUSED" xmlns:no="NO">'
'<sub xmlns:foo="FOO">'
'<a xmlns:unused2="UNUSED"><n64:x xmlns:a="A" a:attr="X"/></a>'
'<foo>foo:bar</foo>'
'</sub>'
'</root>').encode('utf8')
root = self.etree.fromstring(xml)
self.assertEqual(xml, self.etree.tostring(root))
self.etree.cleanup_namespaces(
root,
top_nsmap={'foo': 'FOO', 'unused1': 'UNUSED'},
keep_ns_prefixes=['foo'])
self.assertEqual(
b'<root xmlns:n64="NS64" xmlns:foo="FOO">'
b'<sub>'
b'<a><n64:x xmlns:a="A" a:attr="X"/></a>'
b'<foo>foo:bar</foo>'
b'</sub>'
b'</root>',
self.etree.tostring(root))
def test_element_nsmap(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
self.assertEqual(
r,
e.nsmap)
def test_subelement_nsmap(self):
etree = self.etree
re = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=re)
rs = {None: 'http://ns.infrae.com/honk',
'top': 'http://ns.infrae.com/top'}
s = etree.SubElement(e, '{http://ns.infrae.com/honk}bar', nsmap=rs)
r = re.copy()
r.update(rs)
self.assertEqual(re, e.nsmap)
self.assertEqual(r, s.nsmap)
def test_html_prefix_nsmap(self):
etree = self.etree
el = etree.HTML('<hha:page-description>aa</hha:page-description>').find('.//page-description')
if etree.LIBXML_VERSION < (2, 9, 11):
self.assertEqual({'hha': None}, el.nsmap)
else:
self.assertEqual({}, el.nsmap)
def test_getchildren(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
_bytes('<a><b><d></d></b><c><e></e></c></a>'),
self.etree.tostring(a, method="c14n"))
self.assertEqual(
[b, c],
a.getchildren())
self.assertEqual(
[d],
b.getchildren())
self.assertEqual(
[],
d.getchildren())
def test_getiterator(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[a, b, d, c, e],
list(a.getiterator()))
self.assertEqual(
[d],
list(d.getiterator()))
def test_getiterator_empty(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[],
list(a.getiterator('none')))
self.assertEqual(
[],
list(e.getiterator('none')))
self.assertEqual(
[e],
list(e.getiterator()))
def test_getiterator_filter(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[a],
list(a.getiterator('a')))
a2 = SubElement(e, 'a')
self.assertEqual(
[a, a2],
list(a.getiterator('a')))
self.assertEqual(
[a2],
list(c.getiterator('a')))
def test_getiterator_filter_all(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[a, b, d, c, e],
list(a.getiterator('*')))
def test_getiterator_filter_comment(self):
Element = self.etree.Element
Comment = self.etree.Comment
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
comment_b = Comment("TEST-b")
b.append(comment_b)
self.assertEqual(
[comment_b],
list(a.getiterator(Comment)))
comment_a = Comment("TEST-a")
a.append(comment_a)
self.assertEqual(
[comment_b, comment_a],
list(a.getiterator(Comment)))
self.assertEqual(
[comment_b],
list(b.getiterator(Comment)))
def test_getiterator_filter_pi(self):
Element = self.etree.Element
PI = self.etree.ProcessingInstruction
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
pi_b = PI("TEST-b")
b.append(pi_b)
self.assertEqual(
[pi_b],
list(a.getiterator(PI)))
pi_a = PI("TEST-a")
a.append(pi_a)
self.assertEqual(
[pi_b, pi_a],
list(a.getiterator(PI)))
self.assertEqual(
[pi_b],
list(b.getiterator(PI)))
def test_getiterator_with_text(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = 'a'
b = SubElement(a, 'b')
b.text = 'b'
b.tail = 'b1'
c = SubElement(a, 'c')
c.text = 'c'
c.tail = 'c1'
d = SubElement(b, 'd')
d.text = 'd'
d.tail = 'd1'
e = SubElement(c, 'e')
e.text = 'e'
e.tail = 'e1'
self.assertEqual(
[a, b, d, c, e],
list(a.getiterator()))
#self.assertEqual(
# [d],
# list(d.getiterator()))
def test_getiterator_filter_with_text(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = 'a'
b = SubElement(a, 'b')
b.text = 'b'
b.tail = 'b1'
c = SubElement(a, 'c')
c.text = 'c'
c.tail = 'c1'
d = SubElement(b, 'd')
d.text = 'd'
d.tail = 'd1'
e = SubElement(c, 'e')
e.text = 'e'
e.tail = 'e1'
self.assertEqual(
[a],
list(a.getiterator('a')))
a2 = SubElement(e, 'a')
self.assertEqual(
[a, a2],
list(a.getiterator('a')))
self.assertEqual(
[a2],
list(e.getiterator('a')))
def test_getiterator_filter_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
f = SubElement(c, 'f')
self.assertEqual(
[a, b],
list(a.getiterator('a', 'b')))
self.assertEqual(
[],
list(a.getiterator('x', 'y')))
self.assertEqual(
[a, f],
list(a.getiterator('f', 'a')))
self.assertEqual(
[c, e, f],
list(c.getiterator('c', '*', 'a')))
self.assertEqual(
[],
list(a.getiterator( (), () )))
def test_getiterator_filter_multiple_tuple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
f = SubElement(c, 'f')
self.assertEqual(
[a, b],
list(a.getiterator( ('a', 'b') )))
self.assertEqual(
[],
list(a.getiterator( ('x', 'y') )))
self.assertEqual(
[a, f],
list(a.getiterator( ('f', 'a') )))
self.assertEqual(
[c, e, f],
list(c.getiterator( ('c', '*', 'a') )))
self.assertEqual(
[],
list(a.getiterator( () )))
def test_getiterator_filter_namespace(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('{a}a')
b = SubElement(a, '{a}b')
c = SubElement(a, '{a}c')
d = SubElement(b, '{b}d')
e = SubElement(c, '{a}e')
f = SubElement(c, '{b}f')
g = SubElement(c, 'g')
self.assertEqual(
[a],
list(a.getiterator('{a}a')))
self.assertEqual(
[],
list(a.getiterator('{b}a')))
self.assertEqual(
[],
list(a.getiterator('a')))
self.assertEqual(
[a,b,d,c,e,f,g],
list(a.getiterator('*')))
self.assertEqual(
[f],
list(c.getiterator('{b}*')))
self.assertEqual(
[d, f],
list(a.getiterator('{b}*')))
self.assertEqual(
[g],
list(a.getiterator('g')))
self.assertEqual(
[g],
list(a.getiterator('{}g')))
self.assertEqual(
[g],
list(a.getiterator('{}*')))
def test_getiterator_filter_local_name(self):
Element = self.etree.Element
Comment = self.etree.Comment
SubElement = self.etree.SubElement
a = Element('{a}a')
b = SubElement(a, '{nsA}b')
c = SubElement(b, '{nsB}b')
d = SubElement(a, 'b')
e = SubElement(a, '{nsA}e')
f = SubElement(e, '{nsB}e')
g = SubElement(e, 'e')
a.append(Comment('test'))
self.assertEqual(
[b, c, d],
list(a.getiterator('{*}b')))
self.assertEqual(
[e, f, g],
list(a.getiterator('{*}e')))
self.assertEqual(
[a, b, c, d, e, f, g],
list(a.getiterator('{*}*')))
def test_getiterator_filter_entities(self):
Element = self.etree.Element
Entity = self.etree.Entity
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
entity_b = Entity("TEST-b")
b.append(entity_b)
self.assertEqual(
[entity_b],
list(a.getiterator(Entity)))
entity_a = Entity("TEST-a")
a.append(entity_a)
self.assertEqual(
[entity_b, entity_a],
list(a.getiterator(Entity)))
self.assertEqual(
[entity_b],
list(b.getiterator(Entity)))
def test_getiterator_filter_element(self):
Element = self.etree.Element
Comment = self.etree.Comment
PI = self.etree.PI
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
a.append(Comment("test"))
a.append(PI("pi", "content"))
c = SubElement(a, 'c')
self.assertEqual(
[a, b, c],
list(a.getiterator(Element)))
def test_getiterator_filter_all_comment_pi(self):
# ElementTree iterates over everything here
Element = self.etree.Element
Comment = self.etree.Comment
PI = self.etree.PI
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
a.append(Comment("test"))
a.append(PI("pi", "content"))
c = SubElement(a, 'c')
self.assertEqual(
[a, b, c],
list(a.getiterator('*')))
def test_elementtree_getiterator(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
ElementTree = self.etree.ElementTree
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
t = ElementTree(element=a)
self.assertEqual(
[a, b, d, c, e],
list(t.getiterator()))
def test_elementtree_getiterator_filter(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
ElementTree = self.etree.ElementTree
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
t = ElementTree(element=a)
self.assertEqual(
[a],
list(t.getiterator('a')))
a2 = SubElement(e, 'a')
self.assertEqual(
[a, a2],
list(t.getiterator('a')))
def test_elementtree_getelementpath(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
c.text = d1.text = 'TEXT'
tree = etree.ElementTree(a)
self.assertEqual('.', tree.getelementpath(a))
self.assertEqual('c/d[1]', tree.getelementpath(d1))
self.assertEqual('c/d[2]', tree.getelementpath(d2))
self.assertEqual(d1, tree.find(tree.getelementpath(d1)))
self.assertEqual(d2, tree.find(tree.getelementpath(d2)))
tree = etree.ElementTree(c)
self.assertEqual('.', tree.getelementpath(c))
self.assertEqual('d[2]', tree.getelementpath(d2))
self.assertEqual(d2, tree.find(tree.getelementpath(d2)))
tree = etree.ElementTree(b) # not a parent of a/c/d1/d2
self.assertEqual('.', tree.getelementpath(b))
self.assertRaises(ValueError, tree.getelementpath, a)
self.assertRaises(ValueError, tree.getelementpath, c)
self.assertRaises(ValueError, tree.getelementpath, d2)
def test_elementtree_getelementpath_ns(self):
a = etree.Element("{http://ns1/}a")
b = etree.SubElement(a, "{http://ns1/}b")
c = etree.SubElement(a, "{http://ns1/}c")
d1 = etree.SubElement(c, "{http://ns1/}d")
d2 = etree.SubElement(c, "{http://ns2/}d")
d3 = etree.SubElement(c, "{http://ns1/}d")
tree = etree.ElementTree(a)
self.assertEqual('.', tree.getelementpath(a))
self.assertEqual('{http://ns1/}c/{http://ns1/}d[1]',
tree.getelementpath(d1))
self.assertEqual('{http://ns1/}c/{http://ns2/}d',
tree.getelementpath(d2))
self.assertEqual('{http://ns1/}c/{http://ns1/}d[2]',
tree.getelementpath(d3))
self.assertEqual(a, tree.find(tree.getelementpath(a)))
self.assertEqual(b, tree.find(tree.getelementpath(b)))
self.assertEqual(c, tree.find(tree.getelementpath(c)))
self.assertEqual(d1, tree.find(tree.getelementpath(d1)))
self.assertEqual(d2, tree.find(tree.getelementpath(d2)))
self.assertEqual(d3, tree.find(tree.getelementpath(d3)))
tree = etree.ElementTree(c)
self.assertEqual('{http://ns1/}d[1]', tree.getelementpath(d1))
self.assertEqual('{http://ns2/}d', tree.getelementpath(d2))
self.assertEqual('{http://ns1/}d[2]', tree.getelementpath(d3))
self.assertEqual(d1, tree.find(tree.getelementpath(d1)))
self.assertEqual(d2, tree.find(tree.getelementpath(d2)))
self.assertEqual(d3, tree.find(tree.getelementpath(d3)))
tree = etree.ElementTree(b) # not a parent of d1/d2
self.assertRaises(ValueError, tree.getelementpath, d1)
self.assertRaises(ValueError, tree.getelementpath, d2)
def test_elementtree_iter_qname(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
QName = self.etree.QName
tree = ElementTree(XML(
_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><b/></a>')))
self.assertEqual(
list(tree.iter(QName("b"))),
list(tree.iter("b")),
)
self.assertEqual(
list(tree.iter(QName("X", "b"))),
list(tree.iter("{X}b")),
)
self.assertEqual(
[e.tag for e in tree.iter(QName("X", "b"), QName("b"))],
['{X}b', 'b', '{X}b', 'b', 'b']
)
self.assertEqual(
list(tree.iter(QName("X", "b"), QName("b"))),
list(tree.iter("{X}b", "b"))
)
def test_elementtree_find_qname(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
QName = self.etree.QName
tree = ElementTree(XML(_bytes('<a><b><c/></b><b/><c><b/></c></a>')))
self.assertEqual(tree.find(QName("c")), tree.getroot()[2])
def test_elementtree_findall_qname(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
QName = self.etree.QName
tree = ElementTree(XML(_bytes('<a><b><c/></b><b/><c><b/></c></a>')))
self.assertEqual(len(list(tree.findall(QName("c")))), 1)
def test_elementtree_findall_ns_qname(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
QName = self.etree.QName
tree = ElementTree(XML(
_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><b/></a>')))
self.assertEqual(len(list(tree.findall(QName("b")))), 2)
self.assertEqual(len(list(tree.findall(QName("X", "b")))), 1)
def test_findall_ns(self):
XML = self.etree.XML
root = XML(_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><b/></a>'))
self.assertEqual(len(root.findall(".//{X}b")), 2)
self.assertEqual(len(root.findall(".//{X}*")), 2)
self.assertEqual(len(root.findall(".//b")), 3)
def test_findall_different_nsmaps(self):
XML = self.etree.XML
root = XML(_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><y:b/></a>'))
nsmap = {'xx': 'X'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//xx:*", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
nsmap = {'xx': 'Y'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//xx:*", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
def test_findall_empty_prefix(self):
XML = self.etree.XML
root = XML(_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><y:b/></a>'))
nsmap = {'xx': 'X'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
nsmap = {'xx': 'X', None: 'Y'}
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 1)
nsmap = {'xx': 'X', '': 'Y'}
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 1)
def test_findall_syntax_error(self):
XML = self.etree.XML
root = XML(_bytes('<a><b><c/></b><b/><c><b/><b/></c><b/></a>'))
self.assertRaises(SyntaxError, root.findall, '')
self.assertRaises(SyntaxError, root.findall, '//') # absolute path on Element
self.assertRaises(SyntaxError, root.findall, './//')
def test_index(self):
etree = self.etree
e = etree.Element('foo')
for i in range(10):
etree.SubElement(e, 'a%s' % i)
for i in range(10):
self.assertEqual(
i,
e.index(e[i]))
self.assertEqual(
3, e.index(e[3], 3))
self.assertRaises(
ValueError, e.index, e[3], 4)
self.assertRaises(
ValueError, e.index, e[3], 0, 2)
self.assertRaises(
ValueError, e.index, e[8], 0, -3)
self.assertRaises(
ValueError, e.index, e[8], -5, -3)
self.assertEqual(
8, e.index(e[8], 0, -1))
self.assertEqual(
8, e.index(e[8], -12, -1))
self.assertEqual(
0, e.index(e[0], -12, -1))
def test_replace(self):
etree = self.etree
e = etree.Element('foo')
for i in range(10):
el = etree.SubElement(e, 'a%s' % i)
el.text = "text%d" % i
el.tail = "tail%d" % i
child0 = e[0]
child1 = e[1]
child2 = e[2]
e.replace(e[0], e[1])
self.assertEqual(
9, len(e))
self.assertEqual(
child1, e[0])
self.assertEqual(
child1.text, "text1")
self.assertEqual(
child1.tail, "tail1")
self.assertEqual(
child0.tail, "tail0")
self.assertEqual(
child2, e[1])
e.replace(e[-1], e[0])
self.assertEqual(
child1, e[-1])
self.assertEqual(
child1.text, "text1")
self.assertEqual(
child1.tail, "tail1")
self.assertEqual(
child2, e[0])
def test_replace_new(self):
etree = self.etree
e = etree.Element('foo')
for i in range(10):
etree.SubElement(e, 'a%s' % i)
new_element = etree.Element("test")
new_element.text = "TESTTEXT"
new_element.tail = "TESTTAIL"
child1 = e[1]
e.replace(e[0], new_element)
self.assertEqual(
new_element, e[0])
self.assertEqual(
"TESTTEXT",
e[0].text)
self.assertEqual(
"TESTTAIL",
e[0].tail)
self.assertEqual(
child1, e[1])
def test_setslice_all_reversed(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
e = Element('e')
f = Element('f')
g = Element('g')
a[:] = [e, f, g]
self.assertEqual(
[e, f, g],
list(a))
a[::-1] = [e, f, g]
self.assertEqual(
[g, f, e],
list(a))
def test_setslice_step(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
a[1::2] = [x, y]
self.assertEqual(
[b, x, d, y],
list(a))
def test_setslice_step_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
a[1::-1] = [x, y]
self.assertEqual(
[y, x, d, e],
list(a))
def test_setslice_step_negative2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
a[::-2] = [x, y]
self.assertEqual(
[b, y, d, x],
list(a))
def test_setslice_step_overrun(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
try:
slice
except NameError:
print("slice() not found")
return
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
z = Element('z')
self.assertRaises(
ValueError,
operator.setitem, a, slice(1,None,2), [x, y, z])
self.assertEqual(
[b, c, d, e],
list(a))
def test_sourceline_XML(self):
XML = self.etree.XML
root = XML(_bytes('''<?xml version="1.0"?>
<root><test>
<bla/></test>
</root>
'''))
self.assertEqual(
[2, 2, 4],
[ el.sourceline for el in root.getiterator() ])
def test_large_sourceline_XML(self):
XML = self.etree.XML
root = XML(_bytes(
'<?xml version="1.0"?>\n'
'<root>' + '\n' * 65536 +
'<p>' + '\n' * 65536 + '</p>\n' +
'<br/>\n'
'</root>'))
if self.etree.LIBXML_VERSION >= (2, 9):
expected = [2, 131074, 131076]
else:
expected = [2, 65535, 65535]
self.assertEqual(expected, [el.sourceline for el in root.iter()])
def test_sourceline_parse(self):
parse = self.etree.parse
tree = parse(fileInTestDir('include/test_xinclude.xml'))
self.assertEqual(
[1, 2, 3],
[ el.sourceline for el in tree.getiterator() ])
def test_sourceline_iterparse_end(self):
iterparse = self.etree.iterparse
lines = [ el.sourceline for (event, el) in
iterparse(fileInTestDir('include/test_xinclude.xml')) ]
self.assertEqual(
[2, 3, 1],
lines)
def test_sourceline_iterparse_start(self):
iterparse = self.etree.iterparse
lines = [ el.sourceline for (event, el) in
iterparse(fileInTestDir('include/test_xinclude.xml'),
events=("start",)) ]
self.assertEqual(
[1, 2, 3],
lines)
def test_sourceline_element(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element("test")
self.assertEqual(None, el.sourceline)
child = SubElement(el, "test")
self.assertEqual(None, el.sourceline)
self.assertEqual(None, child.sourceline)
def test_XML_base_url_docinfo(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
docinfo = root.getroottree().docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_XML_set_base_url_docinfo(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
docinfo = root.getroottree().docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
docinfo.URL = "https://secret/url"
self.assertEqual(docinfo.URL, "https://secret/url")
def test_parse_stringio_base_url(self):
etree = self.etree
tree = etree.parse(BytesIO("<root/>"), base_url="http://no/such/url")
docinfo = tree.docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_parse_base_url_docinfo(self):
etree = self.etree
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'),
base_url="http://no/such/url")
docinfo = tree.docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_HTML_base_url_docinfo(self):
etree = self.etree
root = etree.HTML(_bytes("<html/>"), base_url="http://no/such/url")
docinfo = root.getroottree().docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_docinfo_public(self):
etree = self.etree
xml_header = '<?xml version="1.0" encoding="ascii"?>'
pub_id = "-//W3C//DTD XHTML 1.0 Transitional//EN"
sys_id = "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"
doctype_string = '<!DOCTYPE html PUBLIC "%s" "%s">' % (pub_id, sys_id)
xml = _bytes(xml_header + doctype_string + '<html><body></body></html>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "ascii")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, pub_id)
self.assertEqual(docinfo.system_url, sys_id)
self.assertEqual(docinfo.root_name, 'html')
self.assertEqual(docinfo.doctype, doctype_string)
def test_docinfo_system(self):
etree = self.etree
xml_header = '<?xml version="1.0" encoding="UTF-8"?>'
sys_id = "some.dtd"
doctype_string = '<!DOCTYPE html SYSTEM "%s">' % sys_id
xml = _bytes(xml_header + doctype_string + '<html><body></body></html>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "UTF-8")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, None)
self.assertEqual(docinfo.system_url, sys_id)
self.assertEqual(docinfo.root_name, 'html')
self.assertEqual(docinfo.doctype, doctype_string)
def test_docinfo_empty(self):
etree = self.etree
xml = _bytes('<html><body></body></html>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "UTF-8")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, None)
self.assertEqual(docinfo.system_url, None)
self.assertEqual(docinfo.root_name, 'html')
self.assertEqual(docinfo.doctype, '')
def test_docinfo_name_only(self):
etree = self.etree
xml = _bytes('<!DOCTYPE root><root></root>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "UTF-8")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, None)
self.assertEqual(docinfo.system_url, None)
self.assertEqual(docinfo.root_name, 'root')
self.assertEqual(docinfo.doctype, '<!DOCTYPE root>')
def test_doctype_name_only_roundtrip(self):
etree = self.etree
xml = _bytes('<!DOCTYPE root>\n<root/>')
tree = etree.parse(BytesIO(xml))
self.assertEqual(xml, etree.tostring(tree))
def test_doctype_output_override(self):
etree = self.etree
pub_id = "-//W3C//DTD XHTML 1.0 Transitional//EN"
sys_id = "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"
doctype_string = _bytes('<!DOCTYPE html PUBLIC "%s" "%s">' % (pub_id, sys_id))
xml = _bytes('<!DOCTYPE root>\n<root/>')
tree = etree.parse(BytesIO(xml))
self.assertEqual(xml.replace(_bytes('<!DOCTYPE root>'), doctype_string),
etree.tostring(tree, doctype=doctype_string))
def test_xml_base(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
self.assertEqual(root.base, "http://no/such/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'), None)
root.base = "https://secret/url"
self.assertEqual(root.base, "https://secret/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'),
"https://secret/url")
def test_xml_base_attribute(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
self.assertEqual(root.base, "http://no/such/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'), None)
root.set('{http://www.w3.org/XML/1998/namespace}base',
"https://secret/url")
self.assertEqual(root.base, "https://secret/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'),
"https://secret/url")
def test_html_base(self):
etree = self.etree
root = etree.HTML(_bytes("<html><body></body></html>"),
base_url="http://no/such/url")
self.assertEqual(root.base, "http://no/such/url")
def test_html_base_tag(self):
etree = self.etree
root = etree.HTML(_bytes('<html><head><base href="http://no/such/url"></head></html>'))
self.assertEqual(root.base, "http://no/such/url")
def test_indent(self):
ET = self.etree
elem = ET.XML("<root></root>")
ET.indent(elem)
self.assertEqual(ET.tostring(elem), b'<root/>')
elem = ET.XML("<html><body>text</body></html>")
ET.indent(elem)
self.assertEqual(ET.tostring(elem), b'<html>\n <body>text</body>\n</html>')
elem = ET.XML("<html> <body>text</body> </html>")
ET.indent(elem)
self.assertEqual(ET.tostring(elem), b'<html>\n <body>text</body>\n</html>')
elem = ET.XML("<html> <body>text</body> </html>")
ET.indent(elem)
self.assertEqual(ET.tostring(elem), b'<html>\n <body>text</body>\n</html>')
elem = ET.XML("<html><body>text</body>tail</html>")
ET.indent(elem)
self.assertEqual(ET.tostring(elem), b'<html>\n <body>text</body>tail</html>')
elem = ET.XML("<html><body><p>par</p>\n<p>text</p>\t<p><br/></p></body></html>")
ET.indent(elem)
self.assertEqual(
ET.tostring(elem),
b'<html>\n'
b' <body>\n'
b' <p>par</p>\n'
b' <p>text</p>\n'
b' <p>\n'
b' <br/>\n'
b' </p>\n'
b' </body>\n'
b'</html>'
)
elem = ET.XML("<html><body><p>pre<br/>post</p><p>text</p></body></html>")
ET.indent(elem)
self.assertEqual(
ET.tostring(elem),
b'<html>\n'
b' <body>\n'
b' <p>pre<br/>post</p>\n'
b' <p>text</p>\n'
b' </body>\n'
b'</html>'
)
def test_indent_space(self):
ET = self.etree
elem = ET.XML("<html><body><p>pre<br/>post</p><p>text</p></body></html>")
ET.indent(elem, space='\t')
self.assertEqual(
ET.tostring(elem),
b'<html>\n'
b'\t<body>\n'
b'\t\t<p>pre<br/>post</p>\n'
b'\t\t<p>text</p>\n'
b'\t</body>\n'
b'</html>'
)
elem = ET.XML("<html><body><p>pre<br/>post</p><p>text</p></body></html>")
ET.indent(elem, space='')
self.assertEqual(
ET.tostring(elem),
b'<html>\n'
b'<body>\n'
b'<p>pre<br/>post</p>\n'
b'<p>text</p>\n'
b'</body>\n'
b'</html>'
)
def test_indent_space_caching(self):
ET = self.etree
elem = ET.XML("<html><body><p>par</p><p>text</p><p><br/></p><p /></body></html>")
ET.indent(elem)
self.assertEqual(
{el.tail for el in elem.iter()},
{None, "\n", "\n ", "\n "}
)
self.assertEqual(
{el.text for el in elem.iter()},
{None, "\n ", "\n ", "\n ", "par", "text"}
)
# NOTE: lxml does not reuse Python text strings across elements.
#self.assertEqual(
# len({el.tail for el in elem.iter()}),
# len({id(el.tail) for el in elem.iter()}),
#)
def test_indent_level(self):
ET = self.etree
elem = ET.XML("<html><body><p>pre<br/>post</p><p>text</p></body></html>")
try:
ET.indent(elem, level=-1)
except ValueError:
pass
else:
self.assertTrue(False, "ValueError not raised")
self.assertEqual(
ET.tostring(elem),
b"<html><body><p>pre<br/>post</p><p>text</p></body></html>"
)
ET.indent(elem, level=2)
self.assertEqual(
ET.tostring(elem),
b'<html>\n'
b' <body>\n'
b' <p>pre<br/>post</p>\n'
b' <p>text</p>\n'
b' </body>\n'
b' </html>'
)
elem = ET.XML("<html><body><p>pre<br/>post</p><p>text</p></body></html>")
ET.indent(elem, level=1, space=' ')
self.assertEqual(
ET.tostring(elem),
b'<html>\n'
b' <body>\n'
b' <p>pre<br/>post</p>\n'
b' <p>text</p>\n'
b' </body>\n'
b' </html>'
)
def test_parse_fileobject_unicode(self):
# parse from a file object that returns unicode strings
f = LargeFileLikeUnicode()
tree = self.etree.parse(f)
root = tree.getroot()
self.assertTrue(root.tag.endswith('root'))
def test_dtd_io(self):
# check that DTDs that go in also go back out
xml = _bytes('''\
<!DOCTYPE test SYSTEM "test.dtd" [
<!ENTITY entity "tasty">
<!ELEMENT test (a)>
<!ELEMENT a (#PCDATA)>
]>
<test><a>test-test</a></test>\
''')
tree = self.etree.parse(BytesIO(xml))
self.assertEqual(self.etree.tostring(tree).replace(_bytes(" "), _bytes("")),
xml.replace(_bytes(" "), _bytes("")))
def test_byte_zero(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text", 'ha\0ho')
self.assertRaises(ValueError, setattr, a, "tail", 'ha\0ho')
self.assertRaises(ValueError, Element, 'ha\0ho')
def test_unicode_byte_zero(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\0ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\0ho'))
self.assertRaises(ValueError, Element,
_str('ha\0ho'))
def test_byte_invalid(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text", 'ha\x07ho')
self.assertRaises(ValueError, setattr, a, "text", 'ha\x02ho')
self.assertRaises(ValueError, setattr, a, "tail", 'ha\x07ho')
self.assertRaises(ValueError, setattr, a, "tail", 'ha\x02ho')
self.assertRaises(ValueError, Element, 'ha\x07ho')
self.assertRaises(ValueError, Element, 'ha\x02ho')
def test_unicode_byte_invalid(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\x07ho'))
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\x02ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\x07ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\x02ho'))
self.assertRaises(ValueError, Element,
_str('ha\x07ho'))
self.assertRaises(ValueError, Element,
_str('ha\x02ho'))
def test_unicode_byte_invalid_sequence(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\u1234\x07ho'))
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\u1234\x02ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\u1234\x07ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\u1234\x02ho'))
self.assertRaises(ValueError, Element,
_str('ha\u1234\x07ho'))
self.assertRaises(ValueError, Element,
_str('ha\u1234\x02ho'))
def test_encoding_tostring_utf16(self):
# ElementTree fails to serialize this
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tostring(a, encoding='UTF-16')
self.assertEqual(_bytes('<a><b></b><c></c></a>'),
canonicalize(result))
def test_tostring_none(self):
# ElementTree raises an AssertionError here
tostring = self.etree.tostring
self.assertRaises(TypeError, self.etree.tostring, None)
def test_tostring_pretty(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tostring(a)
self.assertEqual(result, _bytes("<a><b/><c/></a>"))
result = tostring(a, pretty_print=False)
self.assertEqual(result, _bytes("<a><b/><c/></a>"))
result = tostring(a, pretty_print=True)
self.assertEqual(result, _bytes("<a>\n <b/>\n <c/>\n</a>\n"))
def test_tostring_with_tail(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.tail = "aTAIL"
b = SubElement(a, 'b')
b.tail = "bTAIL"
c = SubElement(a, 'c')
result = tostring(a)
self.assertEqual(result, _bytes("<a><b/>bTAIL<c/></a>aTAIL"))
result = tostring(a, with_tail=False)
self.assertEqual(result, _bytes("<a><b/>bTAIL<c/></a>"))
result = tostring(a, with_tail=True)
self.assertEqual(result, _bytes("<a><b/>bTAIL<c/></a>aTAIL"))
def test_tostring_method_html_with_tail(self):
tostring = self.etree.tostring
html = self.etree.fromstring(
'<html><body>'
'<div><p>Some text<i>\r\n</i></p></div>\r\n'
'</body></html>',
parser=self.etree.HTMLParser())
self.assertEqual(html.tag, 'html')
div = html.find('.//div')
self.assertEqual(div.tail, '\r\n')
result = tostring(div, method='html')
self.assertEqual(
result,
_bytes("<div><p>Some text<i>\r\n</i></p></div>\r\n"))
result = tostring(div, method='html', with_tail=True)
self.assertEqual(
result,
_bytes("<div><p>Some text<i>\r\n</i></p></div>\r\n"))
result = tostring(div, method='html', with_tail=False)
self.assertEqual(
result,
_bytes("<div><p>Some text<i>\r\n</i></p></div>"))
def test_standalone(self):
tostring = self.etree.tostring
XML = self.etree.XML
ElementTree = self.etree.ElementTree
Element = self.etree.Element
tree = Element("root").getroottree()
self.assertEqual(None, tree.docinfo.standalone)
tree = XML(_bytes("<root/>")).getroottree()
self.assertEqual(None, tree.docinfo.standalone)
tree = XML(_bytes(
"<?xml version='1.0' encoding='ASCII' standalone='yes'?>\n<root/>"
)).getroottree()
self.assertEqual(True, tree.docinfo.standalone)
tree = XML(_bytes(
"<?xml version='1.0' encoding='ASCII' standalone='no'?>\n<root/>"
)).getroottree()
self.assertEqual(False, tree.docinfo.standalone)
def test_tostring_standalone(self):
tostring = self.etree.tostring
XML = self.etree.XML
ElementTree = self.etree.ElementTree
root = XML(_bytes("<root/>"))
tree = ElementTree(root)
self.assertEqual(None, tree.docinfo.standalone)
result = tostring(root, xml_declaration=True, encoding="ASCII")
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII'?>\n<root/>"))
result = tostring(root, xml_declaration=True, encoding="ASCII",
standalone=True)
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII' standalone='yes'?>\n<root/>"))
tree = ElementTree(XML(result))
self.assertEqual(True, tree.docinfo.standalone)
result = tostring(root, xml_declaration=True, encoding="ASCII",
standalone=False)
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII' standalone='no'?>\n<root/>"))
tree = ElementTree(XML(result))
self.assertEqual(False, tree.docinfo.standalone)
def test_tostring_standalone_in_out(self):
tostring = self.etree.tostring
XML = self.etree.XML
ElementTree = self.etree.ElementTree
root = XML(_bytes(
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n<root/>"))
tree = ElementTree(root)
self.assertEqual(True, tree.docinfo.standalone)
result = tostring(root, xml_declaration=True, encoding="ASCII")
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII'?>\n<root/>"))
result = tostring(root, xml_declaration=True, encoding="ASCII",
standalone=True)
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII' standalone='yes'?>\n<root/>"))
def test_tostring_method_text_encoding(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = "A"
a.tail = "tail"
b = SubElement(a, 'b')
b.text = "B"
b.tail = _str("Søk på nettet")
c = SubElement(a, 'c')
c.text = "C"
result = tostring(a, method="text", encoding="UTF-16")
self.assertEqual(_str('ABSøk på nettetCtail').encode("UTF-16"),
result)
def test_tostring_method_text_unicode(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = _str('Søk på nettetA')
a.tail = "tail"
b = SubElement(a, 'b')
b.text = "B"
b.tail = _str('Søk på nettetB')
c = SubElement(a, 'c')
c.text = "C"
self.assertRaises(UnicodeEncodeError,
tostring, a, method="text")
self.assertEqual(
_str('Søk på nettetABSøk på nettetBCtail').encode('utf-8'),
tostring(a, encoding="UTF-8", method="text"))
def test_tounicode(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertTrue(isinstance(tounicode(a), _unicode))
self.assertEqual(_bytes('<a><b></b><c></c></a>'),
canonicalize(tounicode(a)))
def test_tounicode_element(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
self.assertTrue(isinstance(tounicode(b), _unicode))
self.assertTrue(isinstance(tounicode(c), _unicode))
self.assertEqual(_bytes('<b></b>'),
canonicalize(tounicode(b)))
self.assertEqual(_bytes('<c><d></d></c>'),
canonicalize(tounicode(c)))
def test_tounicode_none(self):
tounicode = self.etree.tounicode
self.assertRaises(TypeError, self.etree.tounicode, None)
def test_tounicode_element_tail(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
b.tail = 'Foo'
self.assertTrue(isinstance(tounicode(b), _unicode))
self.assertTrue(tounicode(b) == '<b/>Foo' or
tounicode(b) == '<b />Foo')
def test_tounicode_pretty(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tounicode(a)
self.assertEqual(result, "<a><b/><c/></a>")
result = tounicode(a, pretty_print=False)
self.assertEqual(result, "<a><b/><c/></a>")
result = tounicode(a, pretty_print=True)
self.assertEqual(result, "<a>\n <b/>\n <c/>\n</a>\n")
def test_tostring_unicode(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertTrue(isinstance(tostring(a, encoding=_unicode), _unicode))
self.assertEqual(_bytes('<a><b></b><c></c></a>'),
canonicalize(tostring(a, encoding=_unicode)))
def test_tostring_unicode_element(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
self.assertTrue(isinstance(tostring(b, encoding=_unicode), _unicode))
self.assertTrue(isinstance(tostring(c, encoding=_unicode), _unicode))
self.assertEqual(_bytes('<b></b>'),
canonicalize(tostring(b, encoding=_unicode)))
self.assertEqual(_bytes('<c><d></d></c>'),
canonicalize(tostring(c, encoding=_unicode)))
def test_tostring_unicode_none(self):
tostring = self.etree.tostring
self.assertRaises(TypeError, self.etree.tostring,
None, encoding=_unicode)
def test_tostring_unicode_element_tail(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
b.tail = 'Foo'
self.assertTrue(isinstance(tostring(b, encoding=_unicode), _unicode))
self.assertTrue(tostring(b, encoding=_unicode) == '<b/>Foo' or
tostring(b, encoding=_unicode) == '<b />Foo')
def test_tostring_unicode_pretty(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tostring(a, encoding=_unicode)
self.assertEqual(result, "<a><b/><c/></a>")
result = tostring(a, encoding=_unicode, pretty_print=False)
self.assertEqual(result, "<a><b/><c/></a>")
result = tostring(a, encoding=_unicode, pretty_print=True)
self.assertEqual(result, "<a>\n <b/>\n <c/>\n</a>\n")
def test_pypy_proxy_collect(self):
root = etree.Element('parent')
etree.SubElement(root, 'child')
self.assertEqual(len(root), 1)
self.assertEqual(root[0].tag, 'child')
# in PyPy, GC used to kill the Python proxy instance without cleanup
gc.collect()
self.assertEqual(len(root), 1)
self.assertEqual(root[0].tag, 'child')
def test_element_refcycle(self):
class SubEl(etree.ElementBase):
pass
el1 = SubEl()
el2 = SubEl()
self.assertEqual('SubEl', el1.tag)
self.assertEqual('SubEl', el2.tag)
el1.other = el2
el2.other = el1
del el1, el2
gc.collect()
# not really testing anything here, but it shouldn't crash
def test_proxy_collect_siblings(self):
root = etree.Element('parent')
c1 = etree.SubElement(root, 'child1')
c2 = etree.SubElement(root, 'child2')
root.remove(c1)
root.remove(c2)
c1.addnext(c2)
del c1
# trigger deallocation attempt of c1
c2.getprevious()
# make sure it wasn't deallocated
self.assertEqual('child1', c2.getprevious().tag)
def test_proxy_collect_siblings_text(self):
root = etree.Element('parent')
c1 = etree.SubElement(root, 'child1')
c2 = etree.SubElement(root, 'child2')
root.remove(c1)
root.remove(c2)
c1.addnext(c2)
c1.tail = 'abc'
c2.tail = 'xyz'
del c1
# trigger deallocation attempt of c1
c2.getprevious()
# make sure it wasn't deallocated
self.assertEqual('child1', c2.getprevious().tag)
self.assertEqual('abc', c2.getprevious().tail)
def test_parse_source_pathlike(self):
etree = self.etree
tounicode = self.etree.tounicode
tree = etree.parse(SimpleFSPath(fileInTestDir('test.xml')))
self.assertEqual(_bytes('<a><b></b></a>'),
canonicalize(tounicode(tree)))
def test_iterparse_source_pathlike(self):
iterparse = self.etree.iterparse
events = list(iterparse(SimpleFSPath(fileInTestDir('test.xml'))))
self.assertEqual(2, len(events))
# helper methods
def _writeElement(self, element, encoding='us-ascii', compression=0):
"""Write out element for comparison.
"""
ElementTree = self.etree.ElementTree
f = BytesIO()
tree = ElementTree(element=element)
tree.write(f, encoding=encoding, compression=compression)
data = f.getvalue()
if compression:
data = zlib.decompress(data)
return canonicalize(data)
class _XIncludeTestCase(HelperTestCase):
def test_xinclude_text(self):
filename = fileInTestDir('test_broken.xml')
root = etree.XML(_bytes('''\
<doc xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="%s" parse="text"/>
</doc>
''' % path2url(filename)))
old_text = root.text
content = read_file(filename)
old_tail = root[0].tail
self.include( etree.ElementTree(root) )
self.assertEqual(old_text + content + old_tail,
root.text)
def test_xinclude(self):
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'))
self.assertNotEqual(
'a',
tree.getroot()[1].tag)
# process xincludes
self.include( tree )
# check whether we find it replaced with included data
self.assertEqual(
'a',
tree.getroot()[1].tag)
def test_xinclude_resolver(self):
class res(etree.Resolver):
include_text = read_file(fileInTestDir('test.xml'))
called = {}
def resolve(self, url, id, context):
if url.endswith(".dtd"):
self.called["dtd"] = True
return self.resolve_filename(
fileInTestDir('test.dtd'), context)
elif url.endswith("test_xinclude.xml"):
self.called["input"] = True
return None # delegate to default resolver
else:
self.called["include"] = True
return self.resolve_string(self.include_text, context)
res_instance = res()
parser = etree.XMLParser(load_dtd = True)
parser.resolvers.add(res_instance)
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'),
parser = parser)
self.include(tree)
called = list(res_instance.called.items())
called.sort()
self.assertEqual(
[("dtd", True), ("include", True), ("input", True)],
called)
def test_xinclude_resolver_recursive(self):
data = textwrap.dedent('''
<doc xmlns:xi="http://www.w3.org/2001/XInclude">
<foo/>
<xi:include href="./test.xml" />
</doc>
''')
class Resolver(etree.Resolver):
called = {}
def resolve(self, url, id, context):
if url.endswith("test_xinclude.xml"):
assert not self.called.get("input")
self.called["input"] = True
return None # delegate to default resolver
elif url.endswith('/test5.xml'):
assert not self.called.get("DONE")
self.called["DONE"] = True
return self.resolve_string('<DONE/>', context)
else:
_, filename = url.rsplit('/', 1)
assert not self.called.get(filename)
self.called[filename] = True
next_data = data.replace(
'test.xml', 'test%d.xml' % len(self.called))
return self.resolve_string(next_data, context)
res_instance = Resolver()
parser = etree.XMLParser(load_dtd=True)
parser.resolvers.add(res_instance)
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'),
parser=parser)
self.include(tree)
called = list(res_instance.called.items())
called.sort()
self.assertEqual(
[("DONE", True), ("input", True), ("test.xml", True),
("test2.xml", True), ("test3.xml", True), ("test4.xml", True)],
called)
class ETreeXIncludeTestCase(_XIncludeTestCase):
def include(self, tree):
tree.xinclude()
class ElementIncludeTestCase(_XIncludeTestCase):
from lxml import ElementInclude
def include(self, tree, loader=None, max_depth=None):
self.ElementInclude.include(tree.getroot(), loader=loader, max_depth=max_depth)
XINCLUDE = {}
XINCLUDE["Recursive1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source code of Recursive2.xml:</p>
<xi:include href="Recursive2.xml"/>
</document>
"""
XINCLUDE["Recursive2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source code of Recursive3.xml:</p>
<xi:include href="Recursive3.xml"/>
</document>
"""
XINCLUDE["Recursive3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source code of Recursive1.xml:</p>
<xi:include href="Recursive1.xml"/>
</document>
"""
XINCLUDE["NonRecursive1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is multiple times the source code of NonRecursive3.xml:</p>
<xi:include href="NonRecursive3.xml"/>
<xi:include href="NonRecursive3.xml"/>
<p>The following is multiple times the source code of Leaf.xml:</p>
<xi:include href="Leaf.xml"/>
<xi:include href="Leaf.xml"/>
<xi:include href="Leaf.xml"/>
<p>One more time the source code of NonRecursive3.xml:</p>
<xi:include href="NonRecursive3.xml"/>
</document>
"""
XINCLUDE["NonRecursive2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is multiple times the source code of NonRecursive3.xml:</p>
<xi:include href="NonRecursive3.xml"/>
<xi:include href="NonRecursive3.xml"/>
</document>
"""
XINCLUDE["NonRecursive3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is multiple times the source code of Leaf.xml:</p>
<xi:include href="Leaf.xml"/>
<xi:include href="Leaf.xml"/>
</document>
"""
XINCLUDE["Leaf.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>No further includes</p>
</document>
"""
def xinclude_loader(self, href, parse="xml", encoding=None):
try:
data = textwrap.dedent(self.XINCLUDE[href])
except KeyError:
raise OSError("resource not found")
if parse == "xml":
data = etree.fromstring(data)
return data
def test_xinclude_failures(self):
# Test infinitely recursive includes.
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.FatalIncludeError) as cm:
self.include(document, self.xinclude_loader)
self.assertEqual(str(cm.exception),
"recursive include of 'Recursive2.xml' detected")
# Test 'max_depth' limitation.
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.FatalIncludeError) as cm:
self.include(document, self.xinclude_loader, max_depth=None)
self.assertEqual(str(cm.exception),
"recursive include of 'Recursive2.xml' detected")
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.LimitedRecursiveIncludeError) as cm:
self.include(document, self.xinclude_loader, max_depth=0)
self.assertEqual(str(cm.exception),
"maximum xinclude depth reached when including file Recursive2.xml")
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.LimitedRecursiveIncludeError) as cm:
self.include(document, self.xinclude_loader, max_depth=1)
self.assertEqual(str(cm.exception),
"maximum xinclude depth reached when including file Recursive3.xml")
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.LimitedRecursiveIncludeError) as cm:
self.include(document, self.xinclude_loader, max_depth=2)
self.assertEqual(str(cm.exception),
"maximum xinclude depth reached when including file Recursive1.xml")
document = self.xinclude_loader("Recursive1.xml").getroottree()
with self.assertRaises(self.ElementInclude.FatalIncludeError) as cm:
self.include(document, self.xinclude_loader, max_depth=3)
self.assertEqual(str(cm.exception),
"recursive include of 'Recursive2.xml' detected")
def test_multiple_include_of_same_file(self):
# Test that including the same file multiple times, but on the same level
# is not detected as recursive include
document = self.xinclude_loader("NonRecursive3.xml").getroottree()
self.include(document, self.xinclude_loader)
# same but for more than one level
document = self.xinclude_loader("NonRecursive1.xml").getroottree()
self.include(document, self.xinclude_loader)
# same but no Leaf.xml in top-level file
document = self.xinclude_loader("NonRecursive2.xml").getroottree()
self.include(document, self.xinclude_loader)
class ETreeC14NTestCase(HelperTestCase):
def test_c14n(self):
tree = self.parse(_bytes('<a><b/></a>'))
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
f = BytesIO()
tree.write_c14n(f, compression=9)
with gzip.GzipFile(fileobj=BytesIO(f.getvalue())) as gzfile:
s = gzfile.read()
self.assertEqual(_bytes('<a>'+'<b></b>'*200+'</a>'),
s)
def test_c14n_file(self):
tree = self.parse(_bytes('<a><b/></a>'))
with tmpfile() as filename:
tree.write_c14n(filename)
data = read_file(filename, 'rb')
self.assertEqual(_bytes('<a><b></b></a>'),
data)
def test_c14n_file_pathlike(self):
tree = self.parse(_bytes('<a><b/></a>'))
with tmpfile() as filename:
tree.write_c14n(SimpleFSPath(filename))
data = read_file(filename, 'rb')
self.assertEqual(_bytes('<a><b></b></a>'),
data)
def test_c14n_file_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
with tmpfile() as filename:
tree.write_c14n(filename, compression=9)
with gzip.open(filename, 'rb') as f:
data = f.read()
self.assertEqual(_bytes('<a>'+'<b></b>'*200+'</a>'),
data)
def test_c14n_file_gzip_pathlike(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
with tmpfile() as filename:
tree.write_c14n(SimpleFSPath(filename), compression=9)
with gzip.open(filename, 'rb') as f:
data = f.read()
self.assertEqual(_bytes('<a>'+'<b></b>'*200+'</a>'),
data)
def test_c14n2_file_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
with tmpfile() as filename:
tree.write(filename, method='c14n2', compression=9)
with gzip.open(filename, 'rb') as f:
data = f.read()
self.assertEqual(_bytes('<a>'+'<b></b>'*200+'</a>'),
data)
def test_c14n2_with_text(self):
tree = self.parse(
b'<?xml version="1.0"?> <a> abc \n <b> btext </b> btail <c/> ctail </a> ')
f = BytesIO()
tree.write(f, method='c14n2')
s = f.getvalue()
self.assertEqual(b'<a> abc \n <b> btext </b> btail <c></c> ctail </a>',
s)
f = BytesIO()
tree.write(f, method='c14n2', strip_text=True)
s = f.getvalue()
self.assertEqual(b'<a>abc<b>btext</b>btail<c></c>ctail</a>',
s)
def test_c14n_with_comments(self):
tree = self.parse(_bytes('<!--hi--><a><!--ho--><b/></a><!--hu-->'))
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
f = BytesIO()
tree.write_c14n(f, with_comments=True)
s = f.getvalue()
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
f = BytesIO()
tree.write_c14n(f, with_comments=False)
s = f.getvalue()
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n2_with_comments(self):
tree = self.parse(b'<!--hi--> <a> <!-- ho --> <b/> </a> <!-- hu -->')
self.assertEqual(
b'<!--hi-->\n<a> <!-- ho --> <b></b> </a>\n<!-- hu -->',
etree.tostring(tree, method='c14n2'))
self.assertEqual(
b'<!--hi-->\n<a> <!-- ho --> <b></b> </a>\n<!-- hu -->',
etree.tostring(tree, method='c14n2', with_comments=True))
self.assertEqual(
b'<a> <b></b> </a>',
etree.tostring(tree, method='c14n2', with_comments=False))
def test_c14n2_with_comments_strip_text(self):
tree = self.parse(b'<!--hi--> <a> <!-- ho --> <b/> </a> <!-- hu -->')
self.assertEqual(
b'<!--hi-->\n<a><!-- ho --><b></b></a>\n<!-- hu -->',
etree.tostring(tree, method='c14n2', with_comments=True, strip_text=True))
self.assertEqual(
b'<a><b></b></a>',
etree.tostring(tree, method='c14n2', with_comments=False, strip_text=True))
def test_c14n_tostring_with_comments(self):
tree = self.parse(_bytes('<!--hi--><a><!--ho--><b/></a><!--hu-->'))
s = etree.tostring(tree, method='c14n')
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
s = etree.tostring(tree, method='c14n', with_comments=True)
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
s = etree.tostring(tree, method='c14n', with_comments=False)
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n2_tostring_with_comments(self):
tree = self.parse(b'<!--hi--><a><!--ho--><b/></a><!--hu-->')
s = etree.tostring(tree, method='c14n2')
self.assertEqual(b'<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->',
s)
s = etree.tostring(tree, method='c14n2', with_comments=True)
self.assertEqual(b'<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->',
s)
s = etree.tostring(tree, method='c14n2', with_comments=False)
self.assertEqual(b'<a><b></b></a>',
s)
def test_c14n_element_tostring_with_comments(self):
tree = self.parse(_bytes('<!--hi--><a><!--ho--><b/></a><!--hu-->'))
s = etree.tostring(tree.getroot(), method='c14n')
self.assertEqual(_bytes('<a><!--ho--><b></b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', with_comments=True)
self.assertEqual(_bytes('<a><!--ho--><b></b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', with_comments=False)
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
f = BytesIO()
tree.write_c14n(f, exclusive=False)
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
f = BytesIO()
tree.write_c14n(f, exclusive=True)
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
f = BytesIO()
tree.write_c14n(f, exclusive=True, inclusive_ns_prefixes=['z'])
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
def test_c14n_tostring_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree, method='c14n')
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree, method='c14n', exclusive=False)
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree, method='c14n', exclusive=True)
self.assertEqual(_bytes('<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
s = etree.tostring(tree, method='c14n', exclusive=True, inclusive_ns_prefixes=['y'])
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
def test_c14n_element_tostring_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree.getroot(), method='c14n')
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', exclusive=False)
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', exclusive=True)
self.assertEqual(_bytes('<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=False)
self.assertEqual(_bytes('<z:b xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"></z:b>'),
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=True)
self.assertEqual(_bytes('<z:b xmlns:z="http://cde"></z:b>'),
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=True, inclusive_ns_prefixes=['y'])
self.assertEqual(_bytes('<z:b xmlns:y="http://bcd" xmlns:z="http://cde"></z:b>'),
s)
def test_c14n_tostring_inclusive_ns_prefixes(self):
""" Regression test to fix memory allocation issues (use 3+ inclusive NS spaces)"""
tree = self.parse(_bytes(
'<a xmlns:x="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree, method='c14n', exclusive=True, inclusive_ns_prefixes=['x', 'y', 'z'])
self.assertEqual(_bytes('<a xmlns:x="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
def test_python3_problem_bytesio_iterparse(self):
content = BytesIO('''<?xml version="1.0" encoding="utf-8"?> <some_ns_id:some_head_elem xmlns:some_ns_id="http://www.example.com" xmlns:xhtml="http://www.w3.org/1999/xhtml"><xhtml:div></xhtml:div></some_ns_id:some_head_elem>'''.encode('utf-8'))
def handle_div_end(event, element):
if event == 'end' and element.tag.lower().startswith("{http://www.w3.org/1999/xhtml}div"):
# for ns_id, ns_uri in element.nsmap.items():
# print(type(ns_id), type(ns_uri), ns_id, '=', ns_uri)
etree.tostring(element, method="c14n2")
for event, element in etree.iterparse(
source=content,
events=('start', 'end')
):
handle_div_end(event, element)
def test_python3_problem_filebased_iterparse(self):
with open('test.xml', 'w+b') as f:
f.write('''<?xml version="1.0" encoding="utf-8"?> <some_ns_id:some_head_elem xmlns:some_ns_id="http://www.example.com" xmlns:xhtml="http://www.w3.org/1999/xhtml"><xhtml:div></xhtml:div></some_ns_id:some_head_elem>'''.encode('utf-8'))
def handle_div_end(event, element):
if event == 'end' and element.tag.lower() == "{http://www.w3.org/1999/xhtml}div":
# for ns_id, ns_uri in element.nsmap.items():
# print(type(ns_id), type(ns_uri), ns_id, '=', ns_uri)
etree.tostring(element, method="c14n2")
for event, element in etree.iterparse(
source='test.xml',
events=('start', 'end')
):
handle_div_end(event, element)
def test_python3_problem_filebased_parse(self):
with open('test.xml', 'w+b') as f:
f.write('''<?xml version="1.0" encoding="utf-8"?> <some_ns_id:some_head_elem xmlns:some_ns_id="http://www.example.com" xmlns:xhtml="http://www.w3.org/1999/xhtml"><xhtml:div></xhtml:div></some_ns_id:some_head_elem>'''.encode('utf-8'))
def serialize_div_element(element):
# for ns_id, ns_uri in element.nsmap.items():
# print(type(ns_id), type(ns_uri), ns_id, '=', ns_uri)
etree.tostring(element, method="c14n2")
tree = etree.parse(source='test.xml')
root = tree.getroot()
div = root.xpath('//xhtml:div', namespaces={'xhtml':'http://www.w3.org/1999/xhtml'})[0]
serialize_div_element(div)
class ETreeWriteTestCase(HelperTestCase):
def test_write(self):
tree = self.parse(_bytes('<a><b/></a>'))
f = BytesIO()
tree.write(f)
s = f.getvalue()
self.assertEqual(_bytes('<a><b/></a>'),
s)
def test_write_doctype(self):
tree = self.parse(_bytes('<a><b/></a>'))
f = BytesIO()
tree.write(f, doctype='HUHU')
s = f.getvalue()
self.assertEqual(_bytes('HUHU\n<a><b/></a>'),
s)
def test_write_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
f = BytesIO()
tree.write(f, compression=9)
with gzip.GzipFile(fileobj=BytesIO(f.getvalue())) as gzfile:
s = gzfile.read()
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s)
def test_write_gzip_doctype(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
f = BytesIO()
tree.write(f, compression=9, doctype='<!DOCTYPE a>')
with gzip.GzipFile(fileobj=BytesIO(f.getvalue())) as gzfile:
s = gzfile.read()
self.assertEqual(_bytes('<!DOCTYPE a>\n<a>'+'<b/>'*200+'</a>'),
s)
def test_write_gzip_level(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
f = BytesIO()
tree.write(f, compression=0)
s0 = f.getvalue()
f = BytesIO()
tree.write(f)
self.assertEqual(f.getvalue(), s0)
f = BytesIO()
tree.write(f, compression=1)
s = f.getvalue()
self.assertTrue(len(s) <= len(s0))
with gzip.GzipFile(fileobj=BytesIO(s)) as gzfile:
s1 = gzfile.read()
f = BytesIO()
tree.write(f, compression=9)
s = f.getvalue()
self.assertTrue(len(s) <= len(s0))
with gzip.GzipFile(fileobj=BytesIO(s)) as gzfile:
s9 = gzfile.read()
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s0)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s1)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s9)
def test_write_file(self):
tree = self.parse(_bytes('<a><b/></a>'))
with tmpfile() as filename:
tree.write(filename)
data = read_file(filename, 'rb')
self.assertEqual(_bytes('<a><b/></a>'),
data)
def test_write_file_pathlike(self):
tree = self.parse(_bytes('<a><b/></a>'))
with tmpfile() as filename:
tree.write(SimpleFSPath(filename))
data = read_file(filename, 'rb')
self.assertEqual(_bytes('<a><b/></a>'),
data)
def test_write_file_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
with tmpfile() as filename:
tree.write(filename, compression=9)
with gzip.open(filename, 'rb') as f:
data = f.read()
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
data)
def test_write_file_gzip_pathlike(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
with tmpfile() as filename:
tree.write(SimpleFSPath(filename), compression=9)
with gzip.open(filename, 'rb') as f:
data = f.read()
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
data)
def test_write_file_gzip_parse(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
with tmpfile() as filename:
tree.write(filename, compression=9)
data = etree.tostring(etree.parse(filename))
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
data)
def test_write_file_gzipfile_parse(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
with tmpfile() as filename:
tree.write(filename, compression=9)
with gzip.GzipFile(filename) as f:
data = etree.tostring(etree.parse(f))
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
data)
def test_write_file_url(self):
xml = _bytes('<a>'+'<b/>'*200+'</a>')
tree = self.parse(xml)
with tmpfile(prefix="p+%20", suffix=".xml") as filename:
url = 'file://' + (filename if sys.platform != 'win32'
else '/' + filename.replace('\\', '/'))
tree.write(url)
data = read_file(filename, 'rb').replace(_bytes('\n'), _bytes(''))
self.assertEqual(data, xml)
class ETreeErrorLogTest(HelperTestCase):
etree = etree
def test_parse_error_logging(self):
parse = self.etree.parse
f = BytesIO('<a><b></c></b></a>')
self.etree.clear_error_log()
try:
parse(f)
logs = None
except SyntaxError:
e = sys.exc_info()[1]
logs = e.error_log
f.close()
self.assertTrue([ log for log in logs
if 'mismatch' in log.message ])
self.assertTrue([ log for log in logs
if 'PARSER' in log.domain_name])
self.assertTrue([ log for log in logs
if 'ERR_TAG_NAME_MISMATCH' in log.type_name ])
self.assertTrue([ log for log in logs
if 1 == log.line ])
self.assertTrue([ log for log in logs
if 15 == log.column ])
def _test_python_error_logging(self):
"""This can't really be tested as long as there isn't a way to
reset the logging setup ...
"""
parse = self.etree.parse
messages = []
class Logger(self.etree.PyErrorLog):
def log(self, entry, message, *args):
messages.append(message)
self.etree.use_global_python_log(Logger())
f = BytesIO('<a><b></c></b></a>')
try:
parse(f)
except SyntaxError:
pass
f.close()
self.assertTrue([ message for message in messages
if 'mismatch' in message ])
self.assertTrue([ message for message in messages
if ':PARSER:' in message])
self.assertTrue([ message for message in messages
if ':ERR_TAG_NAME_MISMATCH:' in message ])
self.assertTrue([ message for message in messages
if ':1:15:' in message ])
class XMLPullParserTest(unittest.TestCase):
etree = etree
def assert_event_tags(self, events, expected):
self.assertEqual([(action, elem.tag) for action, elem in events],
expected)
def test_pull_from_simple_target(self):
class Target(object):
def start(self, tag, attrib):
return 'start(%s)' % tag
def end(self, tag):
return 'end(%s)' % tag
def close(self):
return 'close()'
parser = self.etree.XMLPullParser(target=Target())
events = parser.read_events()
parser.feed('<root><element>')
self.assertFalse(list(events))
self.assertFalse(list(events))
parser.feed('</element><child>')
self.assertEqual([('end', 'end(element)')], list(events))
parser.feed('</child>')
self.assertEqual([('end', 'end(child)')], list(events))
parser.feed('</root>')
self.assertEqual([('end', 'end(root)')], list(events))
self.assertFalse(list(events))
self.assertEqual('close()', parser.close())
def test_pull_from_simple_target_start_end(self):
class Target(object):
def start(self, tag, attrib):
return 'start(%s)' % tag
def end(self, tag):
return 'end(%s)' % tag
def close(self):
return 'close()'
parser = self.etree.XMLPullParser(
['start', 'end'], target=Target())
events = parser.read_events()
parser.feed('<root><element>')
self.assertEqual(
[('start', 'start(root)'), ('start', 'start(element)')],
list(events))
self.assertFalse(list(events))
parser.feed('</element><child>')
self.assertEqual(
[('end', 'end(element)'), ('start', 'start(child)')],
list(events))
parser.feed('</child>')
self.assertEqual(
[('end', 'end(child)')],
list(events))
parser.feed('</root>')
self.assertEqual(
[('end', 'end(root)')],
list(events))
self.assertFalse(list(events))
self.assertEqual('close()', parser.close())
def test_pull_from_tree_builder(self):
parser = self.etree.XMLPullParser(
['start', 'end'], target=etree.TreeBuilder())
events = parser.read_events()
parser.feed('<root><element>')
self.assert_event_tags(
events, [('start', 'root'), ('start', 'element')])
self.assertFalse(list(events))
parser.feed('</element><child>')
self.assert_event_tags(
events, [('end', 'element'), ('start', 'child')])
parser.feed('</child>')
self.assert_event_tags(
events, [('end', 'child')])
parser.feed('</root>')
self.assert_event_tags(
events, [('end', 'root')])
self.assertFalse(list(events))
root = parser.close()
self.assertEqual('root', root.tag)
def test_pull_from_tree_builder_subclass(self):
class Target(etree.TreeBuilder):
def end(self, tag):
el = super(Target, self).end(tag)
el.tag += '-huhu'
return el
parser = self.etree.XMLPullParser(
['start', 'end'], target=Target())
events = parser.read_events()
parser.feed('<root><element>')
self.assert_event_tags(
events, [('start', 'root'), ('start', 'element')])
self.assertFalse(list(events))
parser.feed('</element><child>')
self.assert_event_tags(
events, [('end', 'element-huhu'), ('start', 'child')])
parser.feed('</child>')
self.assert_event_tags(
events, [('end', 'child-huhu')])
parser.feed('</root>')
self.assert_event_tags(
events, [('end', 'root-huhu')])
self.assertFalse(list(events))
root = parser.close()
self.assertEqual('root-huhu', root.tag)
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeOnlyTestCase)])
suite.addTests([unittest.makeSuite(ETreeXIncludeTestCase)])
suite.addTests([unittest.makeSuite(ElementIncludeTestCase)])
suite.addTests([unittest.makeSuite(ETreeC14NTestCase)])
suite.addTests([unittest.makeSuite(ETreeWriteTestCase)])
suite.addTests([unittest.makeSuite(ETreeErrorLogTest)])
suite.addTests([unittest.makeSuite(XMLPullParserTest)])
# add original doctests from ElementTree selftest modules
from . import selftest, selftest2
suite.addTests(doctest.DocTestSuite(selftest))
suite.addTests(doctest.DocTestSuite(selftest2))
# add doctests
suite.addTests(doctest.DocTestSuite(etree))
suite.addTests(
[make_doctest('../../../doc/tutorial.txt')])
suite.addTests(
[make_doctest('../../../doc/api.txt')])
suite.addTests(
[make_doctest('../../../doc/FAQ.txt')])
suite.addTests(
[make_doctest('../../../doc/parsing.txt')])
suite.addTests(
[make_doctest('../../../doc/resolvers.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
PYSEC-2022-230
|
svglib/svglib.py
|
@@ -1403,7 +1403,7 @@ def applyStyleOnShape(self, shape, node, only_explicit=False):
shape.fillColor.alpha = shape.fillOpacity
-def svg2rlg(path, **kwargs):
+def svg2rlg(path, resolve_entities=False, **kwargs):
"Convert an SVG file to an RLG Drawing object."
# unzip .svgz file into .svg
@@ -1414,7 +1414,7 @@ def svg2rlg(path, **kwargs):
path = path[:-1]
unzipped = True
- svg_root = load_svg_file(path)
+ svg_root = load_svg_file(path, resolve_entities=resolve_entities)
if svg_root is None:
return
@@ -1429,8 +1429,10 @@ def svg2rlg(path, **kwargs):
return drawing
-def load_svg_file(path):
- parser = etree.XMLParser(remove_comments=True, recover=True)
+def load_svg_file(path, resolve_entities=False):
+ parser = etree.XMLParser(
+ remove_comments=True, recover=True, resolve_entities=resolve_entities
+ )
try:
doc = etree.parse(path, parser=parser)
svg_root = doc.getroot()
|
#!/usr/bin/env python
"""A library for reading and converting SVG.
This is a converter from SVG to RLG (ReportLab Graphics) drawings.
It converts mainly basic shapes, paths and simple text. The intended
usage is either as module within other projects:
from svglib.svglib import svg2rlg
drawing = svg2rlg("foo.svg")
or from the command-line where it is usable as an SVG to PDF converting
tool named sv2pdf (which should also handle SVG files compressed with
gzip and extension .svgz).
"""
import copy
import gzip
import itertools
import logging
import os
import re
import base64
import tempfile
import shlex
import shutil
import subprocess
import sys
from collections import defaultdict, namedtuple
from reportlab.pdfbase.pdfmetrics import registerFont, stringWidth
from reportlab.pdfbase.ttfonts import TTFError, TTFont
from reportlab.pdfgen.canvas import FILL_EVEN_ODD, FILL_NON_ZERO
from reportlab.pdfgen.pdfimages import PDFImage
from reportlab.graphics.shapes import (
_CLOSEPATH, Circle, Drawing, Ellipse, Group, Image, Line, Path, PolyLine,
Polygon, Rect, SolidShape, String,
)
from reportlab.lib import colors
from reportlab.lib.units import pica, toLength
from reportlab.lib.utils import haveImages
from lxml import etree
import cssselect2
import tinycss2
from .utils import (
bezier_arc_from_end_points, convert_quadratic_to_cubic_path,
normalise_svg_path,
)
__version__ = '0.9.3'
__license__ = 'LGPL 3'
__author__ = 'Dinu Gherman'
__date__ = '2019-11-02'
XML_NS = 'http://www.w3.org/XML/1998/namespace'
# A sentinel to identify a situation where a node reference a fragment not yet defined.
DELAYED = object()
STANDARD_FONT_NAMES = (
'Times-Roman', 'Times-Italic', 'Times-Bold', 'Times-BoldItalic',
'Helvetica', 'Helvetica-Oblique', 'Helvetica-Bold', 'Helvetica-BoldOblique',
'Courier', 'Courier-Oblique', 'Courier-Bold', 'Courier-BoldOblique',
'Symbol', 'ZapfDingbats',
)
DEFAULT_FONT_NAME = "Helvetica"
_registered_fonts = {}
logger = logging.getLogger(__name__)
Box = namedtuple('Box', ['x', 'y', 'width', 'height'])
split_whitespace = re.compile(r'[^ \t\r\n\f]+').findall
def find_font(font_name):
"""Return the font and a Boolean indicating if the match is exact."""
if font_name in STANDARD_FONT_NAMES:
return font_name, True
elif font_name in _registered_fonts:
return font_name, _registered_fonts[font_name]
NOT_FOUND = (None, False)
try:
# Try first to register the font if it exists as ttf,
# based on ReportLab font search.
registerFont(TTFont(font_name, '%s.ttf' % font_name))
_registered_fonts[font_name] = True
return font_name, True
except TTFError:
# Try searching with Fontconfig
try:
pipe = subprocess.Popen(
['fc-match', '-s', '--format=%{file}\\n', font_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = pipe.communicate()[0].decode(sys.getfilesystemencoding())
font_path = output.split('\n')[0]
except OSError:
return NOT_FOUND
try:
registerFont(TTFont(font_name, font_path))
except TTFError:
return NOT_FOUND
# Fontconfig may return a default font totally unrelated with font_name
exact = font_name.lower() in os.path.basename(font_path).lower()
_registered_fonts[font_name] = exact
return font_name, exact
class NoStrokePath(Path):
"""
This path object never gets a stroke width whatever the properties it's
getting assigned.
"""
def __init__(self, *args, **kwargs):
copy_from = kwargs.pop('copy_from', None)
super().__init__(*args, **kwargs)
if copy_from:
self.__dict__.update(copy.deepcopy(copy_from.__dict__))
def getProperties(self, *args, **kwargs):
# __getattribute__ wouldn't suit, as RL is directly accessing self.__dict__
props = super().getProperties(*args, **kwargs)
if 'strokeWidth' in props:
props['strokeWidth'] = 0
if 'strokeColor' in props:
props['strokeColor'] = None
return props
class ClippingPath(Path):
def __init__(self, *args, **kwargs):
copy_from = kwargs.pop('copy_from', None)
Path.__init__(self, *args, **kwargs)
if copy_from:
self.__dict__.update(copy.deepcopy(copy_from.__dict__))
self.isClipPath = 1
def getProperties(self, *args, **kwargs):
props = Path.getProperties(self, *args, **kwargs)
if 'fillColor' in props:
props['fillColor'] = None
return props
class CSSMatcher(cssselect2.Matcher):
def __init__(self, style_content):
super().__init__()
self.rules = tinycss2.parse_stylesheet(
style_content, skip_comments=True, skip_whitespace=True
)
for rule in self.rules:
if not rule.prelude:
continue
selectors = cssselect2.compile_selector_list(rule.prelude)
selector_string = tinycss2.serialize(rule.prelude)
content_dict = dict(
(attr.split(':')[0].strip(), attr.split(':')[1].strip())
for attr in tinycss2.serialize(rule.content).split(';')
if ':' in attr
)
payload = (selector_string, content_dict)
for selector in selectors:
self.add_selector(selector, payload)
# Attribute converters (from SVG to RLG)
class AttributeConverter:
"An abstract class to locate and convert attributes in a DOM instance."
def __init__(self):
self.css_rules = None
self.main_box = None
def set_box(self, main_box):
self.main_box = main_box
def parseMultiAttributes(self, line):
"""Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'.
"""
attrs = line.split(';')
attrs = [a.strip() for a in attrs]
attrs = filter(lambda a: len(a) > 0, attrs)
new_attrs = {}
for a in attrs:
k, v = a.split(':')
k, v = [s.strip() for s in (k, v)]
new_attrs[k] = v
return new_attrs
def findAttr(self, svgNode, name):
"""Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned.
"""
# This needs also to lookup values like "url(#SomeName)"...
if not svgNode.attrib.get('__rules_applied', False):
# Apply global styles...
if self.css_rules is not None:
if isinstance(svgNode, NodeTracker):
svgNode.apply_rules(self.css_rules)
else:
ElementWrapper(svgNode).apply_rules(self.css_rules)
# ...and locally defined
if svgNode.attrib.get("style"):
attrs = self.parseMultiAttributes(svgNode.attrib.get("style"))
for key, val in attrs.items():
# lxml nodes cannot accept attributes starting with '-'
if not key.startswith('-'):
svgNode.attrib[key] = val
svgNode.attrib['__rules_applied'] = '1'
attr_value = svgNode.attrib.get(name, '').strip()
if attr_value and attr_value != "inherit":
return attr_value
if svgNode.getparent() is not None:
return self.findAttr(svgNode.getparent(), name)
return ''
def getAllAttributes(self, svgNode):
"Return a dictionary of all attributes of svgNode or those inherited by it."
dict = {}
if node_name(svgNode.getparent()) == 'g':
dict.update(self.getAllAttributes(svgNode.getparent()))
style = svgNode.attrib.get("style")
if style:
d = self.parseMultiAttributes(style)
dict.update(d)
for key, value in svgNode.attrib.items():
if key != "style":
dict[key] = value
return dict
def id(self, svgAttr):
"Return attribute as is."
return svgAttr
def convertTransform(self, svgAttr):
"""Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))]
"""
line = svgAttr.strip()
ops = line[:]
brackets = []
indices = []
for i, lin in enumerate(line):
if lin in "()":
brackets.append(i)
for i in range(0, len(brackets), 2):
bi, bj = brackets[i], brackets[i+1]
subline = line[bi+1:bj]
subline = subline.strip()
subline = subline.replace(',', ' ')
subline = re.sub("[ ]+", ',', subline)
try:
if ',' in subline:
indices.append(tuple(float(num) for num in subline.split(',')))
else:
indices.append(float(subline))
except ValueError:
continue
ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:]
ops = ops.replace(',', ' ').split()
if len(ops) != len(indices):
logger.warning("Unable to parse transform expression '%s'" % svgAttr)
return []
result = []
for i, op in enumerate(ops):
result.append((op, indices[i]))
return result
class Svg2RlgAttributeConverter(AttributeConverter):
"A concrete SVG to RLG attribute converter."
def __init__(self, color_converter=None):
super().__init__()
self.color_converter = color_converter or self.identity_color_converter
@staticmethod
def identity_color_converter(c):
return c
@staticmethod
def split_attr_list(attr):
return shlex.split(attr.strip().replace(',', ' '))
def convertLength(self, svgAttr, em_base=12, attr_name=None, default=0.0):
"Convert length to points."
text = svgAttr.replace(',', ' ').strip()
if not text:
return default
if ' ' in text:
# Multiple length values, returning a list
return [
self.convertLength(val, em_base=em_base, attr_name=attr_name, default=default)
for val in self.split_attr_list(text)
]
if text.endswith('%'):
if self.main_box is None:
logger.error("Unable to resolve percentage unit without a main box")
return float(text[:-1])
if attr_name is None:
logger.error("Unable to resolve percentage unit without knowing the node name")
return float(text[:-1])
if attr_name in ('x', 'cx', 'x1', 'x2', 'width'):
full = self.main_box.width
elif attr_name in ('y', 'cy', 'y1', 'y2', 'height'):
full = self.main_box.height
else:
logger.error("Unable to detect if node '%s' is width or height" % attr_name)
return float(text[:-1])
return float(text[:-1]) / 100 * full
elif text.endswith("pc"):
return float(text[:-2]) * pica
elif text.endswith("pt"):
return float(text[:-2]) * 1.25
elif text.endswith("em"):
return float(text[:-2]) * em_base
elif text.endswith("px"):
return float(text[:-2])
if "ex" in text:
logger.warning("Ignoring unit ex")
text = text.replace("ex", '')
text = text.strip()
length = toLength(text) # this does the default measurements such as mm and cm
return length
def convertLengthList(self, svgAttr):
"""Convert a list of lengths."""
return [self.convertLength(a) for a in self.split_attr_list(svgAttr)]
def convertOpacity(self, svgAttr):
return float(svgAttr)
def convertFillRule(self, svgAttr):
return {
'nonzero': FILL_NON_ZERO,
'evenodd': FILL_EVEN_ODD,
}.get(svgAttr, '')
def convertColor(self, svgAttr):
"Convert string to a RL color object."
# This needs also to lookup values like "url(#SomeName)"...
text = svgAttr
if not text or text == "none":
return None
if text == "currentColor":
return "currentColor"
if len(text) in (7, 9) and text[0] == '#':
color = colors.HexColor(text, hasAlpha=len(text) == 9)
elif len(text) == 4 and text[0] == '#':
color = colors.HexColor('#' + 2*text[1] + 2*text[2] + 2*text[3])
elif len(text) == 5 and text[0] == '#':
color = colors.HexColor(
'#' + 2*text[1] + 2*text[2] + 2*text[3] + 2*text[4], hasAlpha=True
)
else:
# Should handle pcmyk|cmyk|rgb|hsl values (including 'a' for alpha)
color = colors.cssParse(text)
if color is None:
# Test if text is a predefined color constant
try:
color = getattr(colors, text)
except AttributeError:
pass
if color is None:
logger.warning("Can't handle color: %s" % text)
else:
return self.color_converter(color)
def convertLineJoin(self, svgAttr):
return {"miter": 0, "round": 1, "bevel": 2}[svgAttr]
def convertLineCap(self, svgAttr):
return {"butt": 0, "round": 1, "square": 2}[svgAttr]
def convertDashArray(self, svgAttr):
strokeDashArray = self.convertLengthList(svgAttr)
return strokeDashArray
def convertDashOffset(self, svgAttr):
strokeDashOffset = self.convertLength(svgAttr)
return strokeDashOffset
def convertFontFamily(self, svgAttr):
if not svgAttr:
return ''
# very hackish
font_mapping = {
"sans-serif": "Helvetica",
"serif": "Times-Roman",
"times": "Times-Roman",
"monospace": "Courier",
}
font_names = [
font_mapping.get(font_name.lower(), font_name)
for font_name in self.split_attr_list(svgAttr)
]
non_exact_matches = []
for font_name in font_names:
font_name, exact = find_font(font_name)
if exact:
return font_name
elif font_name:
non_exact_matches.append(font_name)
if non_exact_matches:
return non_exact_matches[0]
else:
logger.warning("Unable to find a suitable font for 'font-family:%s'" % svgAttr)
return DEFAULT_FONT_NAME
class ElementWrapper:
"""
lxml element wrapper to partially match the API from cssselect2.ElementWrapper
so as element can be passed to rules.match().
"""
in_html_document = False
def __init__(self, obj):
self.object = obj
@property
def id(self):
return self.object.attrib.get('id')
@property
def etree_element(self):
return self.object
@property
def parent(self):
par = self.object.getparent()
return ElementWrapper(par) if par is not None else None
@property
def classes(self):
cl = self.object.attrib.get('class')
return split_whitespace(cl) if cl is not None else []
@property
def local_name(self):
return node_name(self.object)
@property
def namespace_url(self):
if '}' in self.object.tag:
self.object.tag.split('}')[0][1:]
def iter_ancestors(self):
element = self
while element.parent is not None:
element = element.parent
yield element
def apply_rules(self, rules):
matches = rules.match(self)
for match in matches:
attr_dict = match[3][1]
for attr, val in attr_dict.items():
if attr not in self.object.attrib:
try:
self.object.attrib[attr] = val
except ValueError:
pass
# Set marker on the node to not apply rules more than once
self.object.set('__rules_applied', '1')
class NodeTracker(ElementWrapper):
"""An object wrapper keeping track of arguments to certain method calls.
Instances wrap an object and store all arguments to one special
method, getAttribute(name), in a list of unique elements, usedAttrs.
"""
def __init__(self, obj):
super().__init__(obj)
self.usedAttrs = []
def __repr__(self):
return '<NodeTracker for node %s>' % self.object
def getAttribute(self, name):
# add argument to the history, if not already present
if name not in self.usedAttrs:
self.usedAttrs.append(name)
# forward call to wrapped object
return self.object.attrib.get(name, '')
def __getattr__(self, name):
# forward attribute access to wrapped object
return getattr(self.object, name)
class CircularRefError(Exception):
pass
class ExternalSVG:
def __init__(self, path, renderer):
self.root_node = load_svg_file(path)
self.renderer = SvgRenderer(
path, parent_svgs=renderer._parent_chain + [renderer.source_path]
)
self.rendered = False
def get_fragment(self, fragment):
if not self.rendered:
self.renderer.render(self.root_node)
self.rendered = True
return self.renderer.definitions.get(fragment)
# ## the main meat ###
class SvgRenderer:
"""Renderer that renders an SVG file on a ReportLab Drawing instance.
This is the base class for walking over an SVG DOM document and
transforming it into a ReportLab Drawing instance.
"""
def __init__(self, path, color_converter=None, parent_svgs=None):
self.source_path = path
self._parent_chain = parent_svgs or [] # To detect circular refs.
self.attrConverter = Svg2RlgAttributeConverter(color_converter=color_converter)
self.shape_converter = Svg2RlgShapeConverter(path, self.attrConverter)
self.handled_shapes = self.shape_converter.get_handled_shapes()
self.definitions = {}
self.waiting_use_nodes = defaultdict(list)
self._external_svgs = {}
def render(self, svg_node):
node = NodeTracker(svg_node)
view_box = self.get_box(node, default_box=True)
# Knowing the main box is useful for percentage units
self.attrConverter.set_box(view_box)
main_group = self.renderSvg(node, outermost=True)
for xlink in self.waiting_use_nodes.keys():
logger.debug("Ignoring unavailable object width ID '%s'." % xlink)
main_group.translate(0 - view_box.x, -view_box.height - view_box.y)
width, height = self.shape_converter.convert_length_attrs(
svg_node, "width", "height", defaults=(view_box.width, view_box.height)
)
drawing = Drawing(width, height)
drawing.add(main_group)
return drawing
def renderNode(self, node, parent=None):
n = NodeTracker(node)
nid = n.getAttribute("id")
ignored = False
item = None
name = node_name(node)
clipping = self.get_clippath(n)
if name == "svg":
item = self.renderSvg(n)
parent.add(item)
elif name == "defs":
ignored = True # defs are handled in the initial rendering phase.
elif name == 'a':
item = self.renderA(n)
parent.add(item)
elif name == 'g':
display = n.getAttribute("display")
item = self.renderG(n, clipping=clipping)
if display != "none":
parent.add(item)
elif name == "style":
self.renderStyle(n)
elif name == "symbol":
item = self.renderSymbol(n)
parent.add(item)
elif name == "use":
item = self.renderUse(n, clipping=clipping)
parent.add(item)
elif name == "clipPath":
item = self.renderG(n)
elif name in self.handled_shapes:
if name == 'image':
# We resolve the image target at renderer level because it can point
# to another SVG file or node which has to be rendered too.
target = self.xlink_href_target(n)
if target is None:
return
elif isinstance(target, tuple):
# This is SVG content needed to be rendered
gr = Group()
renderer, node = target
renderer.renderNode(node, parent=gr)
self.apply_node_attr_to_group(n, gr)
parent.add(gr)
return
else:
# Attaching target to node, so we can get it back in convertImage
n._resolved_target = target
item = self.shape_converter.convertShape(name, n, clipping)
display = n.getAttribute("display")
if item and display != "none":
parent.add(item)
else:
ignored = True
logger.debug("Ignoring node: %s" % name)
if not ignored:
if nid and item:
self.definitions[nid] = node
if nid in self.waiting_use_nodes.keys():
to_render = self.waiting_use_nodes.pop(nid)
for use_node, group in to_render:
self.renderUse(use_node, group=group)
self.print_unused_attributes(node, n)
def get_clippath(self, node):
"""
Return the clipping Path object referenced by the node 'clip-path'
attribute, if any.
"""
def get_shape_from_group(group):
for elem in group.contents:
if isinstance(elem, Group):
return get_shape_from_group(elem)
elif isinstance(elem, SolidShape):
return elem
def get_shape_from_node(node):
for child in node.getchildren():
if node_name(child) == 'path':
group = self.shape_converter.convertShape('path', NodeTracker(child))
return group.contents[-1]
elif node_name(child) == 'use':
grp = self.renderUse(NodeTracker(child))
return get_shape_from_group(grp)
elif node_name(child) == 'rect':
return self.shape_converter.convertRect(NodeTracker(child))
else:
return get_shape_from_node(child)
clip_path = node.getAttribute('clip-path')
if not clip_path:
return
m = re.match(r'url\(#([^\)]*)\)', clip_path)
if not m:
return
ref = m.groups()[0]
if ref not in self.definitions:
logger.warning("Unable to find a clipping path with id %s" % ref)
return
shape = get_shape_from_node(self.definitions[ref])
if isinstance(shape, Rect):
# It is possible to use a rect as a clipping path in an svg, so we
# need to convert it to a path for rlg.
x1, y1, x2, y2 = shape.getBounds()
cp = ClippingPath()
cp.moveTo(x1, y1)
cp.lineTo(x2, y1)
cp.lineTo(x2, y2)
cp.lineTo(x1, y2)
cp.closePath()
# Copy the styles from the rect to the clipping path.
copy_shape_properties(shape, cp)
return cp
elif isinstance(shape, Path):
return ClippingPath(copy_from=shape)
elif shape:
logging.error("Unsupported shape type %s for clipping" % shape.__class__.__name__)
def print_unused_attributes(self, node, n):
if logger.level > logging.DEBUG:
return
all_attrs = self.attrConverter.getAllAttributes(node).keys()
unused_attrs = [attr for attr in all_attrs if attr not in n.usedAttrs]
if unused_attrs:
logger.debug("Unused attrs: %s %s" % (node_name(n), unused_attrs))
def apply_node_attr_to_group(self, node, group):
getAttr = node.getAttribute
transform, x, y = map(getAttr, ("transform", "x", "y"))
if x or y:
transform += " translate(%s, %s)" % (x or '0', y or '0')
if transform:
self.shape_converter.applyTransformOnGroup(transform, group)
def xlink_href_target(self, node, group=None):
"""
Return either:
- a tuple (renderer, node) when the the xlink:href attribute targets
a vector file or node
- the path to an image file for any raster image targets
- None if any problem occurs
"""
xlink_href = node.attrib.get('{http://www.w3.org/1999/xlink}href')
if not xlink_href:
return None
# First handle any raster embedded image data
match = re.match(r"^data:image/(jpeg|png);base64", xlink_href)
if match:
img_format = match.groups()[0]
image_data = base64.decodebytes(xlink_href[(match.span(0)[1] + 1):].encode('ascii'))
file_indicator, path = tempfile.mkstemp(suffix='.%s' % img_format)
with open(path, 'wb') as fh:
fh.write(image_data)
# Close temporary file (as opened by tempfile.mkstemp)
os.close(file_indicator)
# this needs to be removed later, not here...
# if exists(path): os.remove(path)
return path
# From here, we can assume this is a path.
if '#' in xlink_href:
iri, fragment = xlink_href.split('#', 1)
else:
iri, fragment = xlink_href, None
if iri:
# Only local relative paths are supported yet
if not isinstance(self.source_path, str):
logger.error(
"Unable to resolve image path '%s' as the SVG source is not "
"a file system path." % iri
)
return None
path = os.path.normpath(os.path.join(os.path.dirname(self.source_path), iri))
if not os.access(path, os.R_OK):
return None
if path == self.source_path:
# Self-referencing, ignore the IRI part
iri = None
if iri:
if path.endswith('.svg'):
if path in self._parent_chain:
logger.error("Circular reference detected in file.")
raise CircularRefError()
if path not in self._external_svgs:
self._external_svgs[path] = ExternalSVG(path, self)
ext_svg = self._external_svgs[path]
if ext_svg.root_node is not None:
if fragment:
ext_frag = ext_svg.get_fragment(fragment)
if ext_frag is not None:
return ext_svg.renderer, ext_frag
else:
return ext_svg.renderer, ext_svg.root_node
else:
# A raster image path
try:
# This will catch invalid images
PDFImage(path, 0, 0)
except IOError:
logger.error("Unable to read the image %s. Skipping..." % path)
return None
return path
elif fragment:
# A pointer to an internal definition
if fragment in self.definitions:
return self, self.definitions[fragment]
else:
# The missing definition should appear later in the file
self.waiting_use_nodes[fragment].append((node, group))
return DELAYED
def renderTitle_(self, node):
# Main SVG title attr. could be used in the PDF document info field.
pass
def renderDesc_(self, node):
# Main SVG desc. attr. could be used in the PDF document info field.
pass
def get_box(self, svg_node, default_box=False):
view_box = svg_node.getAttribute("viewBox")
if view_box:
view_box = self.attrConverter.convertLengthList(view_box)
return Box(*view_box)
if default_box:
width, height = map(svg_node.getAttribute, ("width", "height"))
width, height = map(self.attrConverter.convertLength, (width, height))
return Box(0, 0, width, height)
def renderSvg(self, node, outermost=False):
_saved_preserve_space = self.shape_converter.preserve_space
self.shape_converter.preserve_space = node.getAttribute("{%s}space" % XML_NS) == 'preserve'
# Rendering all definition nodes first.
svg_ns = node.nsmap.get(None)
for def_node in node.iterdescendants('{%s}defs' % svg_ns if svg_ns else 'defs'):
self.renderG(NodeTracker(def_node))
group = Group()
for child in node.getchildren():
self.renderNode(child, group)
self.shape_converter.preserve_space = _saved_preserve_space
# Translating
if not outermost:
x, y = self.shape_converter.convert_length_attrs(node, "x", "y")
if x or y:
group.translate(x or 0, y or 0)
# Scaling
view_box = self.get_box(node)
if not view_box and outermost:
# Apply only the 'reverse' y-scaling (PDF 0,0 is bottom left)
group.scale(1, -1)
elif view_box:
x_scale, y_scale = 1, 1
width, height = self.shape_converter.convert_length_attrs(
node, "width", "height", defaults=(None,) * 2
)
if height is not None and view_box.height != height:
y_scale = height / view_box.height
if width is not None and view_box.width != width:
x_scale = width / view_box.width
group.scale(x_scale, y_scale * (-1 if outermost else 1))
return group
def renderG(self, node, clipping=None, display=1):
getAttr = node.getAttribute
id, transform = map(getAttr, ("id", "transform"))
gr = Group()
if clipping:
gr.add(clipping)
for child in node.getchildren():
item = self.renderNode(child, parent=gr)
if item and display:
gr.add(item)
if transform:
self.shape_converter.applyTransformOnGroup(transform, gr)
return gr
def renderStyle(self, node):
self.attrConverter.css_rules = CSSMatcher(node.text)
def renderSymbol(self, node):
return self.renderG(node, display=0)
def renderA(self, node):
# currently nothing but a group...
# there is no linking info stored in shapes, maybe a group should?
return self.renderG(node)
def renderUse(self, node, group=None, clipping=None):
if group is None:
group = Group()
try:
item = self.xlink_href_target(node, group=group)
except CircularRefError:
node.parent.object.remove(node.object)
return group
if item is None:
return
elif isinstance(item, str):
logger.error("<use> nodes cannot reference bitmap image files")
return
elif item is DELAYED:
return group
else:
item = item[1] # [0] is the renderer, not used here.
if clipping:
group.add(clipping)
if len(node.getchildren()) == 0:
# Append a copy of the referenced node as the <use> child (if not already done)
node.append(copy.deepcopy(item))
self.renderNode(node.getchildren()[-1], parent=group)
self.apply_node_attr_to_group(node, group)
return group
class SvgShapeConverter:
"""An abstract SVG shape converter.
Implement subclasses with methods named 'convertX(node)', where
'X' should be the capitalised name of an SVG node element for
shapes, like 'Rect', 'Circle', 'Line', etc.
Each of these methods should return a shape object appropriate
for the target format.
"""
def __init__(self, path, attrConverter=None):
self.attrConverter = attrConverter or Svg2RlgAttributeConverter()
self.svg_source_file = path
self.preserve_space = False
@classmethod
def get_handled_shapes(cls):
"""Dynamically determine a list of handled shape elements based on
convert<shape> method existence.
"""
return [key[7:].lower() for key in dir(cls) if key.startswith('convert')]
class Svg2RlgShapeConverter(SvgShapeConverter):
"""Converter from SVG shapes to RLG (ReportLab Graphics) shapes."""
def convertShape(self, name, node, clipping=None):
method_name = "convert%s" % name.capitalize()
shape = getattr(self, method_name)(node)
if not shape:
return
if name not in ('path', 'polyline', 'text'):
# Only apply style where the convert method did not apply it.
self.applyStyleOnShape(shape, node)
transform = node.getAttribute("transform")
if not (transform or clipping):
return shape
else:
group = Group()
if transform:
self.applyTransformOnGroup(transform, group)
if clipping:
group.add(clipping)
group.add(shape)
return group
def convert_length_attrs(self, node, *attrs, em_base=None, **kwargs):
# Support node both as NodeTracker or lxml node
getAttr = (
node.getAttribute if hasattr(node, 'getAttribute')
else lambda attr: node.attrib.get(attr, '')
)
convLength = self.attrConverter.convertLength
defaults = kwargs.get('defaults', (0.0,) * len(attrs))
return [
convLength(getAttr(attr), attr_name=attr, em_base=em_base, default=default)
for attr, default in zip(attrs, defaults)
]
def convertLine(self, node):
x1, y1, x2, y2 = self.convert_length_attrs(node, 'x1', 'y1', 'x2', 'y2')
return Line(x1, y1, x2, y2)
def convertRect(self, node):
x, y, width, height, rx, ry = self.convert_length_attrs(
node, 'x', 'y', 'width', 'height', 'rx', 'ry'
)
return Rect(x, y, width, height, rx=rx, ry=ry)
def convertCircle(self, node):
# not rendered if r == 0, error if r < 0.
cx, cy, r = self.convert_length_attrs(node, 'cx', 'cy', 'r')
return Circle(cx, cy, r)
def convertEllipse(self, node):
cx, cy, rx, ry = self.convert_length_attrs(node, 'cx', 'cy', 'rx', 'ry')
width, height = rx, ry
return Ellipse(cx, cy, width, height)
def convertPolyline(self, node):
points = node.getAttribute("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polyline
return None
polyline = PolyLine(points)
self.applyStyleOnShape(polyline, node)
has_fill = self.attrConverter.findAttr(node, 'fill') not in ('', 'none')
if has_fill:
# ReportLab doesn't fill polylines, so we are creating a polygon
# polygon copy of the polyline, but without stroke.
group = Group()
polygon = Polygon(points)
self.applyStyleOnShape(polygon, node)
polygon.strokeColor = None
group.add(polygon)
group.add(polyline)
return group
return polyline
def convertPolygon(self, node):
points = node.getAttribute("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polygon
return None
shape = Polygon(points)
return shape
def clean_text(self, text, preserve_space):
"""Text cleaning as per https://www.w3.org/TR/SVG/text.html#WhiteSpace
"""
if text is None:
return
if preserve_space:
text = text.replace('\r\n', ' ').replace('\n', ' ').replace('\t', ' ')
else:
text = text.replace('\r\n', '').replace('\n', '').replace('\t', ' ')
text = text.strip()
while (' ' in text):
text = text.replace(' ', ' ')
return text
def convertText(self, node):
attrConv = self.attrConverter
xml_space = node.getAttribute("{%s}space" % XML_NS)
if xml_space:
preserve_space = xml_space == 'preserve'
else:
preserve_space = self.preserve_space
gr = Group()
frag_lengths = []
dx0, dy0 = 0, 0
x1, y1 = 0, 0
ff = attrConv.findAttr(node, "font-family") or DEFAULT_FONT_NAME
ff = attrConv.convertFontFamily(ff)
fs = attrConv.findAttr(node, "font-size") or "12"
fs = attrConv.convertLength(fs)
x, y = self.convert_length_attrs(node, 'x', 'y', em_base=fs)
for c in itertools.chain([node], node.getchildren()):
has_x, has_y = False, False
dx, dy = 0, 0
baseLineShift = 0
if node_name(c) in ('text', 'tspan'):
text = self.clean_text(c.text, preserve_space)
if not text:
continue
x1, y1, dx, dy = self.convert_length_attrs(c, 'x', 'y', 'dx', 'dy', em_base=fs)
has_x, has_y = (c.attrib.get('x', '') != '', c.attrib.get('y', '') != '')
dx0 = dx0 + (dx[0] if isinstance(dx, list) else dx)
dy0 = dy0 + (dy[0] if isinstance(dy, list) else dy)
baseLineShift = c.attrib.get("baseline-shift", '0')
if baseLineShift in ("sub", "super", "baseline"):
baseLineShift = {"sub": -fs/2, "super": fs/2, "baseline": 0}[baseLineShift]
else:
baseLineShift = attrConv.convertLength(baseLineShift, em_base=fs)
else:
continue
frag_lengths.append(stringWidth(text, ff, fs))
# When x, y, dx, or dy is a list, we calculate position for each char of text.
if any(isinstance(val, list) for val in (x1, y1, dx, dy)):
if has_x:
xlist = x1 if isinstance(x1, list) else [x1]
else:
xlist = [x + dx0 + sum(frag_lengths[:-1])]
if has_y:
ylist = y1 if isinstance(y1, list) else [y1]
else:
ylist = [y + dy0]
dxlist = dx if isinstance(dx, list) else [dx]
dylist = dy if isinstance(dy, list) else [dy]
last_x, last_y, last_char = xlist[0], ylist[0], ''
for char_x, char_y, char_dx, char_dy, char in itertools.zip_longest(
xlist, ylist, dxlist, dylist, text):
if char is None:
break
if char_dx is None:
char_dx = 0
if char_dy is None:
char_dy = 0
new_x = char_dx + (
last_x + stringWidth(last_char, ff, fs) if char_x is None else char_x
)
new_y = char_dy + (last_y if char_y is None else char_y)
shape = String(new_x, -(new_y - baseLineShift), char)
self.applyStyleOnShape(shape, node)
if node_name(c) == 'tspan':
self.applyStyleOnShape(shape, c)
gr.add(shape)
last_x = new_x
last_y = new_y
last_char = char
else:
new_x = (x1 + dx) if has_x else (x + dx0 + sum(frag_lengths[:-1]))
new_y = (y1 + dy) if has_y else (y + dy0)
shape = String(new_x, -(new_y - baseLineShift), text)
self.applyStyleOnShape(shape, node)
if node_name(c) == 'tspan':
self.applyStyleOnShape(shape, c)
gr.add(shape)
gr.scale(1, -1)
return gr
def convertPath(self, node):
d = node.getAttribute('d')
if not d:
return None
normPath = normalise_svg_path(d)
path = Path()
points = path.points
# Track subpaths needing to be closed later
unclosed_subpath_pointers = []
subpath_start = []
lastop = ''
for i in range(0, len(normPath), 2):
op, nums = normPath[i:i+2]
if op in ('m', 'M') and i > 0 and path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
# moveto absolute
if op == 'M':
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto absolute
elif op == 'L':
path.lineTo(*nums)
# moveto relative
elif op == 'm':
if len(points) >= 2:
if lastop in ('Z', 'z'):
starting_point = subpath_start
else:
starting_point = points[-2:]
xn, yn = starting_point[0] + nums[0], starting_point[1] + nums[1]
path.moveTo(xn, yn)
else:
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto relative
elif op == 'l':
xn, yn = points[-2] + nums[0], points[-1] + nums[1]
path.lineTo(xn, yn)
# horizontal/vertical line absolute
elif op == 'H':
path.lineTo(nums[0], points[-1])
elif op == 'V':
path.lineTo(points[-2], nums[0])
# horizontal/vertical line relative
elif op == 'h':
path.lineTo(points[-2] + nums[0], points[-1])
elif op == 'v':
path.lineTo(points[-2], points[-1] + nums[0])
# cubic bezier, absolute
elif op == 'C':
path.curveTo(*nums)
elif op == 'S':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x2, y2, xn, yn)
# cubic bezier, relative
elif op == 'c':
xp, yp = points[-2:]
x1, y1, x2, y2, xn, yn = nums
path.curveTo(xp + x1, yp + y1, xp + x2, yp + y2, xp + xn, yp + yn)
elif op == 's':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x0 + x2, y0 + y2, x0 + xn, y0 + yn)
# quadratic bezier, absolute
elif op == 'Q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 'T':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# quadratic bezier, relative
elif op == 'q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
x1, y1, xn, yn = x0 + x1, y0 + y1, x0 + xn, y0 + yn
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 't':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
x0, y0 = points[-2:]
xn, yn = nums
xn, yn = x0 + xn, y0 + yn
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# elliptical arc
elif op in ('A', 'a'):
rx, ry, phi, fA, fS, x2, y2 = nums
x1, y1 = points[-2:]
if op == 'a':
x2 += x1
y2 += y1
if abs(rx) <= 1e-10 or abs(ry) <= 1e-10:
path.lineTo(x2, y2)
else:
bp = bezier_arc_from_end_points(x1, y1, rx, ry, phi, fA, fS, x2, y2)
for _, _, x1, y1, x2, y2, xn, yn in bp:
path.curveTo(x1, y1, x2, y2, xn, yn)
# close path
elif op in ('Z', 'z'):
path.closePath()
else:
logger.debug("Suspicious path operator: %s" % op)
lastop = op
gr = Group()
self.applyStyleOnShape(path, node)
if path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
if unclosed_subpath_pointers and path.fillColor is not None:
# ReportLab doesn't fill unclosed paths, so we are creating a copy
# of the path with all subpaths closed, but without stroke.
# https://bitbucket.org/rptlab/reportlab/issues/99/
closed_path = NoStrokePath(copy_from=path)
for pointer in reversed(unclosed_subpath_pointers):
closed_path.operators.insert(pointer, _CLOSEPATH)
gr.add(closed_path)
path.fillColor = None
gr.add(path)
return gr
def convertImage(self, node):
if not haveImages:
logger.warning(
"Unable to handle embedded images. Maybe the pillow library is missing?"
)
return None
x, y, width, height = self.convert_length_attrs(node, 'x', 'y', 'width', 'height')
image = node._resolved_target
image = Image(int(x), int(y + height), int(width), int(height), image)
group = Group(image)
group.translate(0, (y + height) * 2)
group.scale(1, -1)
return group
def applyTransformOnGroup(self, transform, group):
"""Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
"""
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if not isinstance(values, tuple):
values = (values, values)
group.scale(*values)
elif op == "translate":
if isinstance(values, (int, float)):
# From the SVG spec: If <ty> is not provided, it is assumed to be zero.
values = values, 0
group.translate(*values)
elif op == "rotate":
if not isinstance(values, tuple) or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
logger.debug("Ignoring transform: %s %s" % (op, values))
def applyStyleOnShape(self, shape, node, only_explicit=False):
"""
Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied.
"""
# RLG-specific: all RLG shapes
"Apply style attributes of a sequence of nodes to an RL shape."
# tuple format: (svgAttr, rlgAttr, converter, default)
mappingN = (
("fill", "fillColor", "convertColor", "black"),
("fill-opacity", "fillOpacity", "convertOpacity", 1),
("fill-rule", "_fillRule", "convertFillRule", "nonzero"),
("stroke", "strokeColor", "convertColor", "none"),
("stroke-width", "strokeWidth", "convertLength", "1"),
("stroke-opacity", "strokeOpacity", "convertOpacity", 1),
("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"),
("stroke-linecap", "strokeLineCap", "convertLineCap", "0"),
("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"),
)
mappingF = (
("font-family", "fontName", "convertFontFamily", DEFAULT_FONT_NAME),
("font-size", "fontSize", "convertLength", "12"),
("text-anchor", "textAnchor", "id", "start"),
)
if shape.__class__ == Group:
# Recursively apply style on Group subelements
for subshape in shape.contents:
self.applyStyleOnShape(subshape, node, only_explicit=only_explicit)
return
ac = self.attrConverter
for mapping in (mappingN, mappingF):
if shape.__class__ != String and mapping == mappingF:
continue
for (svgAttrName, rlgAttr, func, default) in mapping:
svgAttrValue = ac.findAttr(node, svgAttrName)
if svgAttrValue == '':
if only_explicit:
continue
else:
svgAttrValue = default
if svgAttrValue == "currentColor":
svgAttrValue = ac.findAttr(node.getparent(), "color") or default
try:
meth = getattr(ac, func)
setattr(shape, rlgAttr, meth(svgAttrValue))
except (AttributeError, KeyError, ValueError):
pass
if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor:
shape.fillColor.alpha = shape.fillOpacity
def svg2rlg(path, **kwargs):
"Convert an SVG file to an RLG Drawing object."
# unzip .svgz file into .svg
unzipped = False
if isinstance(path, str) and os.path.splitext(path)[1].lower() == ".svgz":
with gzip.open(path, 'rb') as f_in, open(path[:-1], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
path = path[:-1]
unzipped = True
svg_root = load_svg_file(path)
if svg_root is None:
return
# convert to a RLG drawing
svgRenderer = SvgRenderer(path, **kwargs)
drawing = svgRenderer.render(svg_root)
# remove unzipped .svgz file (.svg)
if unzipped:
os.remove(path)
return drawing
def load_svg_file(path):
parser = etree.XMLParser(remove_comments=True, recover=True)
try:
doc = etree.parse(path, parser=parser)
svg_root = doc.getroot()
except Exception as exc:
logger.error("Failed to load input file! (%s)" % exc)
else:
return svg_root
def node_name(node):
"""Return lxml node name without the namespace prefix."""
try:
return node.tag.split('}')[-1]
except AttributeError:
pass
def copy_shape_properties(source_shape, dest_shape):
for prop, val in source_shape.getProperties().items():
try:
setattr(dest_shape, prop, val)
except AttributeError:
pass
def monkeypatch_reportlab():
"""
https://bitbucket.org/rptlab/reportlab/issues/95/
ReportLab always use 'Even-Odd' filling mode for paths, this patch forces
RL to honor the path fill rule mode (possibly 'Non-Zero Winding') instead.
"""
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics import shapes
original_renderPath = shapes._renderPath
def patchedRenderPath(path, drawFuncs, **kwargs):
# Patched method to transfer fillRule from Path to PDFPathObject
# Get back from bound method to instance
try:
drawFuncs[0].__self__.fillMode = path._fillRule
except AttributeError:
pass
return original_renderPath(path, drawFuncs, **kwargs)
shapes._renderPath = patchedRenderPath
original_drawPath = Canvas.drawPath
def patchedDrawPath(self, path, **kwargs):
current = self._fillMode
if hasattr(path, 'fillMode'):
self._fillMode = path.fillMode
else:
self._fillMode = FILL_NON_ZERO
original_drawPath(self, path, **kwargs)
self._fillMode = current
Canvas.drawPath = patchedDrawPath
monkeypatch_reportlab()
|
GHSA-3vcg-8p79-jpcv
|
mistral/event_engine/default_event_engine.py
|
@@ -23,7 +23,6 @@
from oslo_service import threadgroup
from oslo_utils import fnmatch
import six
-import yaml
from mistral import context as auth_ctx
from mistral.db.v2 import api as db_api
@@ -33,6 +32,7 @@
from mistral import messaging as mistral_messaging
from mistral.rpc import clients as rpc
from mistral.services import security
+from mistral.utils import safe_yaml
LOG = logging.getLogger(__name__)
@@ -83,8 +83,8 @@ def __init__(self):
config = cf.read()
try:
- definition_cfg = yaml.safe_load(config)
- except yaml.YAMLError as err:
+ definition_cfg = safe_yaml.load(config)
+ except safe_yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = (
|
# Copyright 2016 Catalyst IT Ltd
# Copyright 2017 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import json
import os
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import threadgroup
from oslo_utils import fnmatch
import six
import yaml
from mistral import context as auth_ctx
from mistral.db.v2 import api as db_api
from mistral.event_engine import base
from mistral import exceptions
from mistral import expressions
from mistral import messaging as mistral_messaging
from mistral.rpc import clients as rpc
from mistral.services import security
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEFAULT_PROPERTIES = {
'service': '<% $.publisher %>',
'project_id': '<% $.context.project_id %>',
'user_id': '<% $.context.user_id %>',
'timestamp': '<% $.timestamp %>'
}
class EventDefinition(object):
def __init__(self, definition_cfg):
self.cfg = definition_cfg
try:
self.event_types = self.cfg['event_types']
self.properties = self.cfg['properties']
except KeyError as err:
raise exceptions.MistralException(
"Required field %s not specified" % err.args[0]
)
if isinstance(self.event_types, six.string_types):
self.event_types = [self.event_types]
def match_type(self, event_type):
for t in self.event_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def convert(self, event):
return expressions.evaluate_recursively(self.properties, event)
class NotificationsConverter(object):
def __init__(self):
config_file = CONF.event_engine.event_definitions_cfg_file
definition_cfg = []
if os.path.exists(config_file):
with open(config_file) as cf:
config = cf.read()
try:
definition_cfg = yaml.safe_load(config)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = (
"Invalid YAML syntax in Definitions file "
"%(file)s at line: %(line)s, column: %(column)s."
% dict(file=config_file,
line=mark.line + 1,
column=mark.column + 1)
)
else:
errmsg = (
"YAML error reading Definitions file %s" %
CONF.event_engine.event_definitions_cfg_file
)
LOG.error(errmsg)
raise exceptions.MistralError(
'Invalid event definition configuration file. %s' %
config_file
)
self.definitions = [EventDefinition(event_def)
for event_def in reversed(definition_cfg)]
def get_event_definition(self, event_type):
for d in self.definitions:
if d.match_type(event_type):
return d
return None
def convert(self, event_type, event):
edef = self.get_event_definition(event_type)
if edef is None:
LOG.debug('No event definition found for type: %s, use default '
'settings instead.', event_type)
return expressions.evaluate_recursively(DEFAULT_PROPERTIES, event)
return edef.convert(event)
class DefaultEventEngine(base.EventEngine):
"""Event engine server.
A separate service that is responsible for listening event notification
and triggering workflows defined by end user.
"""
def __init__(self):
self.engine_client = rpc.get_engine_client()
self.event_queue = six.moves.queue.Queue()
self.handler_tg = threadgroup.ThreadGroup()
self.event_triggers_map = defaultdict(list)
self.exchange_topic_events_map = defaultdict(set)
self.exchange_topic_listener_map = {}
self.lock = threading.Lock()
LOG.debug('Loading notification definitions.')
self.notification_converter = NotificationsConverter()
self._start_handler()
self._start_listeners()
def _get_endpoint_cls(self, events):
"""Create a messaging endpoint class.
The endpoint implements the method named like the priority, and only
handle the notification match the NotificationFilter rule set into the
filter_rule attribute of the endpoint.
"""
# Handle each priority of notification messages.
event_priorities = ['audit', 'critical', 'debug', 'error', 'info']
attrs = dict.fromkeys(
event_priorities,
mistral_messaging.handle_event
)
attrs['event_types'] = events
endpoint_cls = type(
'MistralNotificationEndpoint',
(mistral_messaging.NotificationEndpoint,),
attrs,
)
return endpoint_cls
def _add_event_listener(self, exchange, topic, events):
"""Add or update event listener for specified exchange, topic.
Create a new event listener for the event trigger if no existing
listener relates to (exchange, topic).
Or, restart existing event listener with updated events.
"""
key = (exchange, topic)
if key in self.exchange_topic_listener_map:
listener = self.exchange_topic_listener_map[key]
listener.stop()
listener.wait()
endpoint = self._get_endpoint_cls(events)(self)
LOG.debug("Starting to listen to AMQP. exchange: %s, topic: %s",
exchange, topic)
listener = mistral_messaging.start_listener(
CONF,
exchange,
topic,
[endpoint]
)
self.exchange_topic_listener_map[key] = listener
def stop_all_listeners(self):
for listener in six.itervalues(self.exchange_topic_listener_map):
listener.stop()
listener.wait()
def _start_listeners(self):
triggers = db_api.get_event_triggers(insecure=True)
LOG.info('Found %s event triggers.', len(triggers))
for trigger in triggers:
exchange_topic = (trigger.exchange, trigger.topic)
self.exchange_topic_events_map[exchange_topic].add(trigger.event)
trigger_info = trigger.to_dict()
trigger_info['workflow_namespace'] = trigger.workflow.namespace
self.event_triggers_map[trigger.event].append(trigger_info)
for (ex_t, events) in self.exchange_topic_events_map.items():
exchange, topic = ex_t
self._add_event_listener(exchange, topic, events)
def _start_workflow(self, triggers, event_params):
"""Start workflows defined in event triggers."""
for t in triggers:
LOG.info('Start to process event trigger: %s', t['id'])
workflow_params = t.get('workflow_params', {})
workflow_params.update({'event_params': event_params})
# Setup context before schedule triggers.
ctx = security.create_context(t['trust_id'], t['project_id'])
auth_ctx.set_ctx(ctx)
description = {
"description": (
"Workflow execution created by event"
" trigger '(%s)'." % t['id']
),
"triggered_by": {
"type": "event_trigger",
"id": t['id'],
"name": t['name']
}
}
try:
self.engine_client.start_workflow(
t['workflow_id'],
t['workflow_namespace'],
None,
t['workflow_input'],
description=json.dumps(description),
**workflow_params
)
except Exception as e:
LOG.exception("Failed to process event trigger %s, "
"error: %s", t['id'], str(e))
finally:
auth_ctx.set_ctx(None)
def _process_event_queue(self, *args, **kwargs):
"""Process notification events.
This function is called in a thread.
"""
while True:
event = self.event_queue.get()
context = event.get('context')
event_type = event.get('event_type')
# NOTE(kong): Use lock here to protect event_triggers_map variable
# from being updated outside the thread.
with self.lock:
if event_type in self.event_triggers_map:
triggers = self.event_triggers_map[event_type]
# There may be more projects registered the same event.
project_ids = [t['project_id'] for t in triggers]
any_public = any(
[t['scope'] == 'public' for t in triggers]
)
# Skip the event doesn't belong to any event trigger owner.
if (not any_public and CONF.pecan.auth_enable and
context.get('project_id', '') not in project_ids):
self.event_queue.task_done()
continue
# Need to choose what trigger(s) should be called exactly.
triggers_to_call = []
for t in triggers:
project_trigger = (
t['project_id'] == context.get('project_id')
)
public_trigger = t['scope'] == 'public'
if project_trigger or public_trigger:
triggers_to_call.append(t)
LOG.debug('Start to handle event: %s, %d trigger(s) '
'registered.', event_type, len(triggers))
event_params = self.notification_converter.convert(
event_type,
event
)
self._start_workflow(triggers_to_call, event_params)
self.event_queue.task_done()
def _start_handler(self):
"""Starts event queue handler in a thread group."""
LOG.info('Starting event notification task...')
self.handler_tg.add_thread(self._process_event_queue)
def process_notification_event(self, notification):
"""Callback function by event handler.
Just put notification into a queue.
"""
LOG.debug("Putting notification event to event queue.")
self.event_queue.put(notification)
def create_event_trigger(self, trigger, events):
"""An endpoint method for creating event trigger.
When creating an event trigger in API layer, we need to create a new
listener or update an existing listener.
:param trigger: a dict containing event trigger information.
:param events: a list of events binding to the (exchange, topic) of
the event trigger.
"""
with self.lock:
ids = [t['id'] for t in self.event_triggers_map[trigger['event']]]
if trigger['id'] not in ids:
self.event_triggers_map[trigger['event']].append(trigger)
self._add_event_listener(trigger['exchange'], trigger['topic'], events)
def update_event_trigger(self, trigger):
"""An endpoint method for updating event trigger.
Because only workflow related information is allowed to be updated, we
only need to update event_triggers_map(in a synchronous way).
:param trigger: a dict containing event trigger information.
"""
assert trigger['event'] in self.event_triggers_map
with self.lock:
for t in self.event_triggers_map[trigger['event']]:
if trigger['id'] == t['id']:
t.update(trigger)
def delete_event_trigger(self, trigger, events):
"""An endpoint method for deleting event trigger.
If there is no event binding to (exchange, topic) after deletion, we
need to delete the related listener. Otherwise, we need to restart
that listener.
:param trigger: a dict containing event trigger information.
:param events: a list of events binding to the (exchange, topic) of
the event trigger.
"""
assert trigger['event'] in self.event_triggers_map
with self.lock:
for t in self.event_triggers_map[trigger['event']]:
if t['id'] == trigger['id']:
self.event_triggers_map[trigger['event']].remove(t)
break
if not self.event_triggers_map[trigger['event']]:
del self.event_triggers_map[trigger['event']]
if not events:
key = (trigger['exchange'], trigger['topic'])
listener = self.exchange_topic_listener_map[key]
listener.stop()
listener.wait()
del self.exchange_topic_listener_map[key]
LOG.info(
'Deleted listener for exchange: %s, topic: %s',
trigger['exchange'],
trigger['topic']
)
return
self._add_event_listener(trigger['exchange'], trigger['topic'], events)
|
GHSA-443j-6p7g-6v4w
|
mistral/lang/parser.py
|
@@ -15,7 +15,6 @@
import cachetools
import threading
-import yaml
from yaml import error
import six
@@ -27,6 +26,7 @@
from mistral.lang.v2 import tasks as tasks_v2
from mistral.lang.v2 import workbook as wb_v2
from mistral.lang.v2 import workflows as wf_v2
+from mistral.utils import safe_yaml
V2_0 = '2.0'
@@ -50,7 +50,7 @@ def parse_yaml(text):
"""
try:
- return yaml.safe_load(text) or {}
+ return safe_yaml.load(text) or {}
except error.YAMLError as e:
raise exc.DSLParsingException(
"Definition could not be parsed: %s\n" % e
|
# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cachetools
import threading
import yaml
from yaml import error
import six
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
from mistral.lang import base
from mistral.lang.v2 import actions as actions_v2
from mistral.lang.v2 import tasks as tasks_v2
from mistral.lang.v2 import workbook as wb_v2
from mistral.lang.v2 import workflows as wf_v2
V2_0 = '2.0'
ALL_VERSIONS = [V2_0]
# {workflow execution id => workflow specification}.
_WF_EX_CACHE = cachetools.LRUCache(maxsize=100)
_WF_EX_CACHE_LOCK = threading.RLock()
# {(workflow def id, workflow def updated at) => workflow specification}.
_WF_DEF_CACHE = cachetools.LRUCache(maxsize=100)
_WF_DEF_CACHE_LOCK = threading.RLock()
def parse_yaml(text):
"""Loads a text in YAML format as dictionary object.
:param text: YAML text.
:return: Parsed YAML document as dictionary.
"""
try:
return yaml.safe_load(text) or {}
except error.YAMLError as e:
raise exc.DSLParsingException(
"Definition could not be parsed: %s\n" % e
)
def _get_spec_version(spec_dict):
# If version is not specified it will '2.0' by default.
ver = V2_0
if 'version' in spec_dict:
ver = spec_dict['version']
def _raise(ver):
raise exc.DSLParsingException('Unsupported DSL version: %s' % ver)
try:
str_ver = str(float(ver))
except (ValueError, TypeError):
_raise(ver)
if not ver or str_ver not in ALL_VERSIONS:
_raise(ver)
return ver
# Factory methods to get specifications either from raw YAML formatted text or
# from dictionaries parsed from YAML formatted text.
def get_workbook_spec(spec_dict, validate):
if _get_spec_version(spec_dict) == V2_0:
return base.instantiate_spec(
wb_v2.WorkbookSpec, spec_dict, validate
)
return None
def get_workbook_spec_from_yaml(text, validate=True):
return get_workbook_spec(parse_yaml(text), validate)
def get_action_spec(spec_dict):
if _get_spec_version(spec_dict) == V2_0:
return base.instantiate_spec(actions_v2.ActionSpec, spec_dict)
return None
def get_action_spec_from_yaml(text, action_name):
spec_dict = parse_yaml(text)
spec_dict['name'] = action_name
return get_action_spec(spec_dict)
def get_action_list_spec(spec_dict, validate):
return base.instantiate_spec(
actions_v2.ActionListSpec, spec_dict, validate
)
def get_action_list_spec_from_yaml(text, validate=True):
return get_action_list_spec(parse_yaml(text), validate=validate)
def get_workflow_spec(spec_dict):
"""Get workflow specification object from dictionary.
NOTE: For large workflows this method can work very long (seconds).
For this reason, method 'get_workflow_spec_by_definition_id' or
'get_workflow_spec_by_execution_id' should be used whenever possible
because they cache specification objects.
:param spec_dict: Raw specification dictionary.
"""
if _get_spec_version(spec_dict) == V2_0:
return base.instantiate_spec(wf_v2.WorkflowSpec, spec_dict)
return None
def get_workflow_list_spec(spec_dict, validate):
return base.instantiate_spec(
wf_v2.WorkflowListSpec,
spec_dict,
validate
)
def get_workflow_spec_from_yaml(text):
return get_workflow_spec(parse_yaml(text))
def get_workflow_list_spec_from_yaml(text, validate=True):
return get_workflow_list_spec(parse_yaml(text), validate)
def get_task_spec(spec_dict):
if _get_spec_version(spec_dict) == V2_0:
return base.instantiate_spec(tasks_v2.TaskSpec, spec_dict)
return None
def get_workflow_definition(wb_def, wf_name):
wf_name = wf_name + ":"
return _parse_def_from_wb(wb_def, "workflows:", wf_name)
def get_action_definition(wb_def, action_name):
action_name += ":"
return _parse_def_from_wb(wb_def, "actions:", action_name)
def _parse_def_from_wb(wb_def, section_name, item_name):
io = six.StringIO(wb_def[wb_def.index(section_name):])
io.readline()
definition = []
ident = 0
# Get the indentation of the action/workflow name tag.
for line in io:
if item_name == line.strip():
ident = line.index(item_name)
definition.append(line.lstrip())
break
# Add strings to list unless same/less indentation is found.
for line in io:
new_line = line.strip()
if not new_line:
definition.append(line)
elif new_line.startswith("#"):
new_line = line if ident > line.index("#") else line[ident:]
definition.append(new_line)
else:
temp = line.index(line.lstrip())
if ident < temp:
definition.append(line[ident:])
else:
break
io.close()
return ''.join(definition).rstrip() + '\n'
# Methods for obtaining specifications in a more efficient way using
# caching techniques.
@cachetools.cached(_WF_EX_CACHE, lock=_WF_EX_CACHE_LOCK)
def get_workflow_spec_by_execution_id(wf_ex_id):
"""Gets workflow specification by workflow execution id.
The idea is that when a workflow execution is running we
must be getting the same workflow specification even if
the workflow definition has already changed. However, note
that this is true only if the current engine instance didn't
restart during the entire workflow execution run.
:param wf_ex_id: Workflow execution id.
:return: Workflow specification.
"""
if not wf_ex_id:
return None
wf_ex = db_api.get_workflow_execution(wf_ex_id)
return get_workflow_spec(wf_ex.spec)
@cachetools.cached(_WF_DEF_CACHE, lock=_WF_DEF_CACHE_LOCK)
def get_workflow_spec_by_definition_id(wf_def_id, wf_def_updated_at):
"""Gets specification by workflow definition id and its 'updated_at'.
The idea of this method is to return a cached specification for the
given workflow id and workflow definition 'updated_at'. As long as the
given workflow definition remains the same in DB users of this method
will be getting a cached value. Once the workflow definition has
changed clients will be providing a different 'updated_at' value and
hence this method will be called and spec is updated for this combination
of parameters. Old cached values will be kicked out by LRU algorithm
if the cache runs out of space.
:param wf_def_id: Workflow definition id.
:param wf_def_updated_at: Workflow definition 'updated_at' value. It
serves only as part of cache key and is not explicitly used in the
method.
:return: Workflow specification.
"""
if not wf_def_id:
return None
wf_def = db_api.get_workflow_definition(wf_def_id)
return get_workflow_spec(wf_def.spec)
def cache_workflow_spec_by_execution_id(wf_ex_id, wf_spec):
with _WF_EX_CACHE_LOCK:
_WF_EX_CACHE[cachetools.keys.hashkey(wf_ex_id)] = wf_spec
def get_wf_execution_spec_cache_size():
return len(_WF_EX_CACHE)
def get_wf_definition_spec_cache_size():
return len(_WF_DEF_CACHE)
def clear_caches():
"""Clears all specification caches."""
with _WF_EX_CACHE_LOCK:
_WF_EX_CACHE.clear()
with _WF_DEF_CACHE_LOCK:
_WF_DEF_CACHE.clear()
|
GHSA-443j-6p7g-6v4w
|
mistral/services/workflows.py
|
@@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import yaml
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
from mistral.lang import parser as spec_parser
from mistral import services
+from mistral.utils import safe_yaml
from mistral.workflow import states
from mistral_lib import utils
from oslo_log import log as logging
@@ -95,7 +95,7 @@ def _append_all_workflows(definition, is_system, scope, namespace,
wf_list_spec, db_wfs):
wfs = wf_list_spec.get_workflows()
- wfs_yaml = yaml.load(definition) if len(wfs) != 1 else None
+ wfs_yaml = safe_yaml.load(definition) if len(wfs) != 1 else None
for wf_spec in wfs:
if len(wfs) != 1:
@@ -135,7 +135,7 @@ def update_workflows(definition, scope='private', identifier=None,
db_wfs = []
- wfs_yaml = yaml.load(definition) if len(wfs) != 1 else None
+ wfs_yaml = safe_yaml.load(definition) if len(wfs) != 1 else None
with db_api.transaction():
for wf_spec in wfs:
@@ -205,7 +205,7 @@ def _update_workflow(wf_spec, definition, scope, identifier=None,
def _cut_wf_definition_from_all(wfs_yaml, wf_name):
- return yaml.dump({
+ return safe_yaml.dump({
'version': wfs_yaml['version'],
wf_name: wfs_yaml[wf_name]
})
|
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
from mistral.lang import parser as spec_parser
from mistral import services
from mistral.workflow import states
from mistral_lib import utils
from oslo_log import log as logging
STD_WF_PATH = 'resources/workflows'
LOG = logging.getLogger(__name__)
def register_standard_workflows(run_in_tx=True):
LOG.debug("Registering standard workflows...")
workflow_paths = utils.get_file_list(STD_WF_PATH)
for wf_path in workflow_paths:
workflow_definition = open(wf_path).read()
create_workflows(
workflow_definition,
scope='public',
is_system=True,
run_in_tx=run_in_tx,
namespace=''
)
def _clear_system_workflow_db():
db_api.delete_workflow_definitions(is_system=True)
def sync_db():
LOG.debug("Syncing db...")
with db_api.transaction():
_clear_system_workflow_db()
register_standard_workflows(run_in_tx=False)
def create_workflows(definition, scope='private', is_system=False,
run_in_tx=True, namespace='', validate=True):
LOG.debug("Creating workflows...")
wf_list_spec = spec_parser.get_workflow_list_spec_from_yaml(
definition,
validate=services.is_validation_enabled(validate)
)
db_wfs = []
if run_in_tx:
with db_api.transaction():
_append_all_workflows(
definition,
is_system,
scope,
namespace,
wf_list_spec,
db_wfs
)
else:
_append_all_workflows(
definition,
is_system,
scope,
namespace,
wf_list_spec,
db_wfs
)
return db_wfs
def _append_all_workflows(definition, is_system, scope, namespace,
wf_list_spec, db_wfs):
wfs = wf_list_spec.get_workflows()
wfs_yaml = yaml.load(definition) if len(wfs) != 1 else None
for wf_spec in wfs:
if len(wfs) != 1:
definition = _cut_wf_definition_from_all(
wfs_yaml,
wf_spec.get_name()
)
db_wfs.append(
_create_workflow(
wf_spec,
definition,
scope,
namespace,
is_system
)
)
def update_workflows(definition, scope='private', identifier=None,
namespace='', validate=True):
LOG.debug("Updating workflows...")
wf_list_spec = spec_parser.get_workflow_list_spec_from_yaml(
definition,
validate=services.is_validation_enabled(validate)
)
wfs = wf_list_spec.get_workflows()
if identifier and len(wfs) > 1:
raise exc.InputException(
"More than one workflows are not supported for "
"update with identifier. [identifier: %s]" %
identifier
)
db_wfs = []
wfs_yaml = yaml.load(definition) if len(wfs) != 1 else None
with db_api.transaction():
for wf_spec in wfs:
if len(wfs) != 1:
definition = _cut_wf_definition_from_all(
wfs_yaml,
wf_spec.get_name()
)
db_wfs.append(
_update_workflow(
wf_spec,
definition,
scope,
namespace=namespace,
identifier=identifier
)
)
return db_wfs
def update_workflow_execution_env(wf_ex, env):
if not env:
return wf_ex
if wf_ex.state not in [states.IDLE, states.PAUSED, states.ERROR]:
raise exc.NotAllowedException(
'Updating env to workflow execution is only permitted if '
'it is in IDLE, PAUSED, or ERROR state.'
)
wf_ex.params['env'] = utils.merge_dicts(wf_ex.params['env'], env)
return wf_ex
def _get_workflow_values(wf_spec, definition, scope, namespace=None,
is_system=False):
values = {
'name': wf_spec.get_name(),
'tags': wf_spec.get_tags(),
'definition': definition,
'spec': wf_spec.to_dict(),
'scope': scope,
'namespace': namespace,
'is_system': is_system
}
return values
def _create_workflow(wf_spec, definition, scope, namespace, is_system):
return db_api.create_workflow_definition(
_get_workflow_values(wf_spec, definition, scope, namespace, is_system)
)
def _update_workflow(wf_spec, definition, scope, identifier=None,
namespace=''):
values = _get_workflow_values(wf_spec, definition, scope, namespace)
return db_api.update_workflow_definition(
identifier if identifier else values['name'],
values
)
def _cut_wf_definition_from_all(wfs_yaml, wf_name):
return yaml.dump({
'version': wfs_yaml['version'],
wf_name: wfs_yaml[wf_name]
})
|
GHSA-443j-6p7g-6v4w
|
mistral/tests/unit/lang/v2/base.py
|
@@ -14,11 +14,11 @@
import copy
-import yaml
from mistral import exceptions as exc
from mistral.lang import parser as spec_parser
from mistral.tests.unit import base
+from mistral.utils import safe_yaml
from mistral_lib import utils
@@ -75,9 +75,10 @@ def _parse_dsl_spec(self, dsl_file=None, add_tasks=False,
dsl_yaml = base.get_resource(self._resource_path + '/' + dsl_file)
if changes:
- dsl_dict = yaml.safe_load(dsl_yaml)
+ dsl_dict = safe_yaml.safe_load(dsl_yaml)
utils.merge_dicts(dsl_dict, changes)
- dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False)
+ dsl_yaml = safe_yaml.safe_dump(dsl_dict,
+ default_flow_style=False)
else:
dsl_dict = copy.deepcopy(self._dsl_blank)
@@ -87,7 +88,7 @@ def _parse_dsl_spec(self, dsl_file=None, add_tasks=False,
if changes:
utils.merge_dicts(dsl_dict, changes)
- dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False)
+ dsl_yaml = safe_yaml.safe_dump(dsl_dict, default_flow_style=False)
if not expect_error:
return self._spec_parser(dsl_yaml)
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import yaml
from mistral import exceptions as exc
from mistral.lang import parser as spec_parser
from mistral.tests.unit import base
from mistral_lib import utils
class WorkflowSpecValidationTestCase(base.BaseTest):
def __init__(self, *args, **kwargs):
super(WorkflowSpecValidationTestCase, self).__init__(*args, **kwargs)
# The relative resource path is ./mistral/tests/resources/workbook/v2.
self._resource_path = 'workbook/v2'
self._spec_parser = spec_parser.get_workflow_list_spec_from_yaml
self._dsl_blank = {
'version': '2.0',
'test': {
'type': 'direct'
}
}
self._dsl_tasks = {
'get': {
'action': 'std.http',
'input': {
'url': 'https://www.openstack.org'
}
},
'echo': {
'action': 'std.echo',
'input': {
'output': 'This is a test.'
}
},
'email': {
'action': 'std.email',
'input': {
'from_addr': '[email protected]',
'to_addrs': ['[email protected]'],
'subject': 'Test',
'body': 'This is a test.',
'smtp_server': 'localhost',
'smtp_password': 'password'
}
}
}
def _parse_dsl_spec(self, dsl_file=None, add_tasks=False,
changes=None, expect_error=False):
if dsl_file and add_tasks:
raise Exception('The add_tasks option is not a valid '
'combination with the dsl_file option.')
if dsl_file:
dsl_yaml = base.get_resource(self._resource_path + '/' + dsl_file)
if changes:
dsl_dict = yaml.safe_load(dsl_yaml)
utils.merge_dicts(dsl_dict, changes)
dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False)
else:
dsl_dict = copy.deepcopy(self._dsl_blank)
if add_tasks:
dsl_dict['test']['tasks'] = copy.deepcopy(self._dsl_tasks)
if changes:
utils.merge_dicts(dsl_dict, changes)
dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False)
if not expect_error:
return self._spec_parser(dsl_yaml)
else:
return self.assertRaises(
exc.DSLParsingException,
self._spec_parser,
dsl_yaml
)
class WorkbookSpecValidationTestCase(WorkflowSpecValidationTestCase):
def __init__(self, *args, **kwargs):
super(WorkbookSpecValidationTestCase, self).__init__(*args, **kwargs)
self._spec_parser = spec_parser.get_workbook_spec_from_yaml
self._dsl_blank = {
'version': '2.0',
'name': 'test_wb'
}
def _parse_dsl_spec(self, dsl_file=None, add_tasks=False,
changes=None, expect_error=False):
return super(WorkbookSpecValidationTestCase, self)._parse_dsl_spec(
dsl_file=dsl_file, add_tasks=False, changes=changes,
expect_error=expect_error)
|
GHSA-443j-6p7g-6v4w
|
django/contrib/sessions/backends/file.py
|
@@ -26,6 +26,8 @@ def __init__(self, session_key=None):
self.file_prefix = settings.SESSION_COOKIE_NAME
super(SessionStore, self).__init__(session_key)
+ VALID_KEY_CHARS = set("abcdef0123456789")
+
def _key_to_file(self, session_key=None):
"""
Get the file associated with this session key.
@@ -36,9 +38,9 @@ def _key_to_file(self, session_key=None):
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
- if os.path.sep in session_key:
+ if not set(session_key).issubset(self.VALID_KEY_CHARS):
raise SuspiciousOperation(
- "Invalid characters (directory components) in session key")
+ "Invalid characters in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
|
import errno
import os
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
class SessionStore(SessionBase):
"""
Implements a file based session store.
"""
def __init__(self, session_key=None):
self.storage_path = getattr(settings, "SESSION_FILE_PATH", None)
if not self.storage_path:
self.storage_path = tempfile.gettempdir()
# Make sure the storage path is valid.
if not os.path.isdir(self.storage_path):
raise ImproperlyConfigured(
"The session storage path %r doesn't exist. Please set your"
" SESSION_FILE_PATH setting to an existing directory in which"
" Django can store session data." % self.storage_path)
self.file_prefix = settings.SESSION_COOKIE_NAME
super(SessionStore, self).__init__(session_key)
def _key_to_file(self, session_key=None):
"""
Get the file associated with this session key.
"""
if session_key is None:
session_key = self.session_key
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
if os.path.sep in session_key:
raise SuspiciousOperation(
"Invalid characters (directory components) in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
def load(self):
session_data = {}
try:
session_file = open(self._key_to_file(), "rb")
try:
file_data = session_file.read()
# Don't fail if there is no data in the session file.
# We may have opened the empty placeholder file.
if file_data:
try:
session_data = self.decode(file_data)
except (EOFError, SuspiciousOperation):
self.create()
finally:
session_file.close()
except IOError:
pass
return session_data
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
self._session_cache = {}
return
def save(self, must_create=False):
# Get the session data now, before we start messing
# with the file it is stored within.
session_data = self._get_session(no_load=must_create)
session_file_name = self._key_to_file()
try:
# Make sure the file exists. If it does not already exist, an
# empty placeholder file is created.
flags = os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0)
if must_create:
flags |= os.O_EXCL
fd = os.open(session_file_name, flags)
os.close(fd)
except OSError, e:
if must_create and e.errno == errno.EEXIST:
raise CreateError
raise
# Write the session file without interfering with other threads
# or processes. By writing to an atomically generated temporary
# file and then using the atomic os.rename() to make the complete
# file visible, we avoid having to lock the session file, while
# still maintaining its integrity.
#
# Note: Locking the session file was explored, but rejected in part
# because in order to be atomic and cross-platform, it required a
# long-lived lock file for each session, doubling the number of
# files in the session storage directory at any given time. This
# rename solution is cleaner and avoids any additional overhead
# when reading the session data, which is the more common case
# unless SESSION_SAVE_EVERY_REQUEST = True.
#
# See ticket #8616.
dir, prefix = os.path.split(session_file_name)
try:
output_file_fd, output_file_name = tempfile.mkstemp(dir=dir,
prefix=prefix + '_out_')
renamed = False
try:
try:
os.write(output_file_fd, self.encode(session_data))
finally:
os.close(output_file_fd)
os.rename(output_file_name, session_file_name)
renamed = True
finally:
if not renamed:
os.unlink(output_file_name)
except (OSError, IOError, EOFError):
pass
def exists(self, session_key):
if os.path.exists(self._key_to_file(session_key)):
return True
return False
def delete(self, session_key=None):
if session_key is None:
if self._session_key is None:
return
session_key = self._session_key
try:
os.unlink(self._key_to_file(session_key))
except OSError:
pass
def clean(self):
pass
|
GHSA-7g9h-c88w-r7h2
|
django/contrib/sessions/tests.py
|
@@ -129,6 +129,17 @@
>>> file_session = FileSession(file_session.session_key)
>>> file_session.save()
+# Ensure we don't allow directory traversal
+>>> FileSession("a/b/c").load()
+Traceback (innermost last):
+ ...
+SuspiciousOperation: Invalid characters in session key
+
+>>> FileSession("a\\b\\c").load()
+Traceback (innermost last):
+ ...
+SuspiciousOperation: Invalid characters in session key
+
# Make sure the file backend checks for a good storage dir
>>> settings.SESSION_FILE_PATH = "/if/this/directory/exists/you/have/a/weird/computer"
>>> FileSession()
|
r"""
>>> from django.conf import settings
>>> from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
>>> from django.contrib.sessions.backends.cache import SessionStore as CacheSession
>>> from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
>>> from django.contrib.sessions.backends.file import SessionStore as FileSession
>>> from django.contrib.sessions.backends.base import SessionBase
>>> from django.contrib.sessions.models import Session
>>> db_session = DatabaseSession()
>>> db_session.modified
False
>>> db_session.get('cat')
>>> db_session['cat'] = "dog"
>>> db_session.modified
True
>>> db_session.pop('cat')
'dog'
>>> db_session.pop('some key', 'does not exist')
'does not exist'
>>> db_session.save()
>>> db_session.exists(db_session.session_key)
True
>>> db_session.delete(db_session.session_key)
>>> db_session.exists(db_session.session_key)
False
>>> db_session['foo'] = 'bar'
>>> db_session.save()
>>> db_session.exists(db_session.session_key)
True
>>> prev_key = db_session.session_key
>>> db_session.flush()
>>> db_session.exists(prev_key)
False
>>> db_session.session_key == prev_key
False
>>> db_session.modified, db_session.accessed
(True, True)
>>> db_session['a'], db_session['b'] = 'c', 'd'
>>> db_session.save()
>>> prev_key = db_session.session_key
>>> prev_data = db_session.items()
>>> db_session.cycle_key()
>>> db_session.session_key == prev_key
False
>>> db_session.items() == prev_data
True
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
>>> Session.objects.filter(pk=db_session.session_key).delete()
>>> db_session = DatabaseSession(db_session.session_key)
>>> db_session.save()
>>> DatabaseSession('1').get('cat')
#
# Cached DB session tests
#
>>> cdb_session = CacheDBSession()
>>> cdb_session.modified
False
>>> cdb_session['cat'] = "dog"
>>> cdb_session.modified
True
>>> cdb_session.pop('cat')
'dog'
>>> cdb_session.pop('some key', 'does not exist')
'does not exist'
>>> cdb_session.save()
>>> cdb_session.exists(cdb_session.session_key)
True
>>> cdb_session.delete(cdb_session.session_key)
>>> cdb_session.exists(cdb_session.session_key)
False
#
# File session tests.
#
# Do file session tests in an isolated directory, and kill it after we're done.
>>> original_session_file_path = settings.SESSION_FILE_PATH
>>> import tempfile
>>> temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
>>> file_session = FileSession()
>>> file_session.modified
False
>>> file_session['cat'] = "dog"
>>> file_session.modified
True
>>> file_session.pop('cat')
'dog'
>>> file_session.pop('some key', 'does not exist')
'does not exist'
>>> file_session.save()
>>> file_session.exists(file_session.session_key)
True
>>> file_session.delete(file_session.session_key)
>>> file_session.exists(file_session.session_key)
False
>>> FileSession('1').get('cat')
>>> file_session['foo'] = 'bar'
>>> file_session.save()
>>> file_session.exists(file_session.session_key)
True
>>> prev_key = file_session.session_key
>>> file_session.flush()
>>> file_session.exists(prev_key)
False
>>> file_session.session_key == prev_key
False
>>> file_session.modified, file_session.accessed
(True, True)
>>> file_session['a'], file_session['b'] = 'c', 'd'
>>> file_session.save()
>>> prev_key = file_session.session_key
>>> prev_data = file_session.items()
>>> file_session.cycle_key()
>>> file_session.session_key == prev_key
False
>>> file_session.items() == prev_data
True
>>> Session.objects.filter(pk=file_session.session_key).delete()
>>> file_session = FileSession(file_session.session_key)
>>> file_session.save()
# Make sure the file backend checks for a good storage dir
>>> settings.SESSION_FILE_PATH = "/if/this/directory/exists/you/have/a/weird/computer"
>>> FileSession()
Traceback (innermost last):
...
ImproperlyConfigured: The session storage path '/if/this/directory/exists/you/have/a/weird/computer' doesn't exist. Please set your SESSION_FILE_PATH setting to an existing directory in which Django can store session data.
# Clean up after the file tests
>>> settings.SESSION_FILE_PATH = original_session_file_path
>>> import shutil
>>> shutil.rmtree(temp_session_store)
#
# Cache-based tests
# NB: be careful to delete any sessions created; stale sessions fill up the
# /tmp and eventually overwhelm it after lots of runs (think buildbots)
#
>>> cache_session = CacheSession()
>>> cache_session.modified
False
>>> cache_session['cat'] = "dog"
>>> cache_session.modified
True
>>> cache_session.pop('cat')
'dog'
>>> cache_session.pop('some key', 'does not exist')
'does not exist'
>>> cache_session.save()
>>> cache_session.delete(cache_session.session_key)
>>> cache_session.exists(cache_session.session_key)
False
>>> cache_session['foo'] = 'bar'
>>> cache_session.save()
>>> cache_session.exists(cache_session.session_key)
True
>>> prev_key = cache_session.session_key
>>> cache_session.flush()
>>> cache_session.exists(prev_key)
False
>>> cache_session.session_key == prev_key
False
>>> cache_session.modified, cache_session.accessed
(True, True)
>>> cache_session['a'], cache_session['b'] = 'c', 'd'
>>> cache_session.save()
>>> prev_key = cache_session.session_key
>>> prev_data = cache_session.items()
>>> cache_session.cycle_key()
>>> cache_session.session_key == prev_key
False
>>> cache_session.items() == prev_data
True
>>> cache_session = CacheSession()
>>> cache_session.save()
>>> key = cache_session.session_key
>>> cache_session.exists(key)
True
>>> Session.objects.filter(pk=cache_session.session_key).delete()
>>> cache_session = CacheSession(cache_session.session_key)
>>> cache_session.save()
>>> cache_session.delete(cache_session.session_key)
>>> s = SessionBase()
>>> s._session['some key'] = 'exists' # Pre-populate the session with some data
>>> s.accessed = False # Reset to pretend this wasn't accessed previously
>>> s.accessed, s.modified
(False, False)
>>> s.pop('non existant key', 'does not exist')
'does not exist'
>>> s.accessed, s.modified
(True, False)
>>> s.setdefault('foo', 'bar')
'bar'
>>> s.setdefault('foo', 'baz')
'bar'
>>> s.accessed = False # Reset the accessed flag
>>> s.pop('some key')
'exists'
>>> s.accessed, s.modified
(True, True)
>>> s.pop('some key', 'does not exist')
'does not exist'
>>> s.get('update key', None)
# test .update()
>>> s.modified = s.accessed = False # Reset to pretend this wasn't accessed previously
>>> s.update({'update key':1})
>>> s.accessed, s.modified
(True, True)
>>> s.get('update key', None)
1
# test .has_key()
>>> s.modified = s.accessed = False # Reset to pretend this wasn't accessed previously
>>> s.has_key('update key')
True
>>> s.accessed, s.modified
(True, False)
# test .values()
>>> s = SessionBase()
>>> s.values()
[]
>>> s.accessed
True
>>> s['x'] = 1
>>> s.values()
[1]
# test .iterkeys()
>>> s.accessed = False
>>> i = s.iterkeys()
>>> hasattr(i,'__iter__')
True
>>> s.accessed
True
>>> list(i)
['x']
# test .itervalues()
>>> s.accessed = False
>>> i = s.itervalues()
>>> hasattr(i,'__iter__')
True
>>> s.accessed
True
>>> list(i)
[1]
# test .iteritems()
>>> s.accessed = False
>>> i = s.iteritems()
>>> hasattr(i,'__iter__')
True
>>> s.accessed
True
>>> list(i)
[('x', 1)]
# test .clear()
>>> s.modified = s.accessed = False
>>> s.items()
[('x', 1)]
>>> s.clear()
>>> s.items()
[]
>>> s.accessed, s.modified
(True, True)
#########################
# Custom session expiry #
#########################
>>> from django.conf import settings
>>> from datetime import datetime, timedelta
>>> td10 = timedelta(seconds=10)
# A normal session has a max age equal to settings
>>> s.get_expiry_age() == settings.SESSION_COOKIE_AGE
True
# So does a custom session with an idle expiration time of 0 (but it'll expire
# at browser close)
>>> s.set_expiry(0)
>>> s.get_expiry_age() == settings.SESSION_COOKIE_AGE
True
# Custom session idle expiration time
>>> s.set_expiry(10)
>>> delta = s.get_expiry_date() - datetime.now()
>>> delta.seconds in (9, 10)
True
>>> age = s.get_expiry_age()
>>> age in (9, 10)
True
# Custom session fixed expiry date (timedelta)
>>> s.set_expiry(td10)
>>> delta = s.get_expiry_date() - datetime.now()
>>> delta.seconds in (9, 10)
True
>>> age = s.get_expiry_age()
>>> age in (9, 10)
True
# Custom session fixed expiry date (fixed datetime)
>>> s.set_expiry(datetime.now() + td10)
>>> delta = s.get_expiry_date() - datetime.now()
>>> delta.seconds in (9, 10)
True
>>> age = s.get_expiry_age()
>>> age in (9, 10)
True
# Set back to default session age
>>> s.set_expiry(None)
>>> s.get_expiry_age() == settings.SESSION_COOKIE_AGE
True
# Allow to set back to default session age even if no alternate has been set
>>> s.set_expiry(None)
# We're changing the setting then reverting back to the original setting at the
# end of these tests.
>>> original_expire_at_browser_close = settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
>>> settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# Custom session age
>>> s.set_expiry(10)
>>> s.get_expire_at_browser_close()
False
# Custom expire-at-browser-close
>>> s.set_expiry(0)
>>> s.get_expire_at_browser_close()
True
# Default session age
>>> s.set_expiry(None)
>>> s.get_expire_at_browser_close()
False
>>> settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Custom session age
>>> s.set_expiry(10)
>>> s.get_expire_at_browser_close()
False
# Custom expire-at-browser-close
>>> s.set_expiry(0)
>>> s.get_expire_at_browser_close()
True
# Default session age
>>> s.set_expiry(None)
>>> s.get_expire_at_browser_close()
True
>>> settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = original_expire_at_browser_close
"""
if __name__ == '__main__':
import doctest
doctest.testmod()
|
GHSA-7g9h-c88w-r7h2
|
tensorflow/python/kernel_tests/sparse_serialization_ops_test.py
|
@@ -16,10 +16,12 @@
import numpy as np
+from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
@@ -460,6 +462,18 @@ def testDeserializeManyFailsInvalidProto(self):
self._testDeserializeFailsInvalidProtoHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
+ def testDeserializeInvalidVariant(self):
+ mu = gen_resource_variable_ops.mutex_v2()
+ mu_lock = gen_resource_variable_ops.mutex_lock(mutex=mu)
+
+ @def_function.function
+ def f():
+ return sparse_ops.deserialize_sparse(
+ serialized_sparse=mu_lock, dtype=dtypes.int32)
+
+ with self.assertRaisesRegex(ValueError, r"Shape must be at least rank 1"):
+ f()
+
if __name__ == "__main__":
test.main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SerializeSparse."""
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SerializeSparseTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _testSerializeDeserializeHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_5x6(np.arange(6))
serialized = serialize_fn(sp_input, out_type=out_type)
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
indices, values, shape = self.evaluate(sp_deserialized)
self.assertAllEqual(indices, sp_input[0])
self.assertAllEqual(values, sp_input[1])
self.assertAllEqual(shape, sp_input[2])
def testSerializeDeserialize(self):
self._testSerializeDeserializeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
def testVariantSerializeDeserialize(self):
self._testSerializeDeserializeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeDeserializeBatchHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_5x6(np.arange(6))
serialized = serialize_fn(sp_input, out_type=out_type)
serialized = array_ops.stack([serialized, serialized])
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input[0])
self.assertAllEqual(combined_values[:6], sp_input[1])
self.assertAllEqual(combined_values[6:], sp_input[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testSerializeDeserializeBatch(self):
self._testSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testSerializeDeserializeManyBatch(self):
self._testSerializeDeserializeBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeBatch(self):
self._testSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeDeserializeBatchInconsistentShapeHelper(
self, serialize_fn, deserialize_fn, out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testSerializeDeserializeBatchInconsistentShape(self):
self._testSerializeDeserializeBatchInconsistentShapeHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeBatchInconsistentShape(self):
self._testSerializeDeserializeBatchInconsistentShapeHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeDeserializeNestedBatchHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_5x6(np.arange(6))
serialized = serialize_fn(sp_input, out_type=out_type)
serialized = array_ops.stack([serialized, serialized])
serialized = array_ops.stack([serialized, serialized])
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
# minibatch 0
self.assertAllEqual(combined_indices[:6, :2], [[0, 0]] * 6)
self.assertAllEqual(combined_indices[:6, 2:], sp_input[0])
self.assertAllEqual(combined_values[:6], sp_input[1])
# minibatch 1
self.assertAllEqual(combined_indices[6:12, :2], [[0, 1]] * 6)
self.assertAllEqual(combined_indices[6:12, 2:], sp_input[0])
self.assertAllEqual(combined_values[6:12], sp_input[1])
# minibatch 2
self.assertAllEqual(combined_indices[12:18, :2], [[1, 0]] * 6)
self.assertAllEqual(combined_indices[12:18, 2:], sp_input[0])
self.assertAllEqual(combined_values[12:18], sp_input[1])
# minibatch 3
self.assertAllEqual(combined_indices[18:, :2], [[1, 1]] * 6)
self.assertAllEqual(combined_indices[18:, 2:], sp_input[0])
self.assertAllEqual(combined_values[18:], sp_input[1])
self.assertAllEqual(combined_shape, [2, 2, 5, 6])
@test_util.run_deprecated_v1
def testSerializeDeserializeNestedBatch(self):
self._testSerializeDeserializeNestedBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeNestedBatch(self):
self._testSerializeDeserializeNestedBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
def _testFeedSerializeDeserializeBatchHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized, {sp_input0: input0_val,
sp_input1: input1_val})
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testFeedSerializeDeserializeBatch(self):
self._testFeedSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testFeedSerializeDeserializeManyBatch(self):
self._testFeedSerializeDeserializeBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testFeedVariantSerializeDeserializeBatch(self):
self._testFeedSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeManyShapeHelper(self,
serialize_many_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
serialized = serialize_many_fn(sparse_tensor, out_type=out_type)
serialized_value = sess.run(
serialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(serialized_value.shape, (4, 3))
@test_util.run_deprecated_v1
def testSerializeManyShape(self):
self._testSerializeManyShapeHelper(sparse_ops.serialize_many_sparse)
def testVariantSerializeManyShape(self):
# NOTE: The following test is a no-op as it is currently not possible to
# convert the serialized variant value to a numpy value.
pass
def _testSerializeManyDeserializeBatchHelper(self,
serialize_many_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
serialized = serialize_many_fn(sparse_tensor, out_type=out_type)
deserialized = deserialize_fn(serialized, dtype=dtypes.string)
deserialized_value = sess.run(
deserialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertAllEqual(deserialized_value.indices, indices_value)
self.assertAllEqual(deserialized_value.values, values_value)
self.assertAllEqual(deserialized_value.dense_shape, shape_value)
@test_util.run_deprecated_v1
def testSerializeManyDeserializeBatch(self):
self._testSerializeManyDeserializeBatchHelper(
sparse_ops.serialize_many_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testSerializeManyDeserializeManyBatch(self):
self._testSerializeManyDeserializeBatchHelper(
sparse_ops.serialize_many_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeManyDeserializeBatch(self):
self._testSerializeManyDeserializeBatchHelper(
sparse_ops.serialize_many_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeScalar(self):
with self.session(use_gpu=False) as sess:
indices_value = np.array([[]], dtype=np.int64)
values_value = np.array([37], dtype=np.int32)
shape_value = np.array([], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder()
serialized = sparse_ops.serialize_sparse(
sparse_tensor, out_type=dtypes.variant)
deserialized = sparse_ops.deserialize_sparse(
serialized, dtype=dtypes.int32)
deserialized_value = sess.run(
deserialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertAllEqual(deserialized_value.indices, indices_value)
self.assertAllEqual(deserialized_value.values, values_value)
self.assertAllEqual(deserialized_value.dense_shape, shape_value)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeScalarBatch(self):
with self.session(use_gpu=False) as sess:
indices_value = np.array([[]], dtype=np.int64)
values_value = np.array([37], dtype=np.int32)
shape_value = np.array([], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder()
serialized = sparse_ops.serialize_sparse(
sparse_tensor, out_type=dtypes.variant)
stacked = array_ops.stack([serialized, serialized])
deserialized = sparse_ops.deserialize_sparse(stacked, dtype=dtypes.int32)
deserialized_value = sess.run(
deserialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertAllEqual(deserialized_value.indices,
np.array([[0], [1]], dtype=np.int64))
self.assertAllEqual(deserialized_value.values,
np.array([37, 37], dtype=np.int32))
self.assertAllEqual(deserialized_value.dense_shape,
np.array([2], dtype=np.int64))
def _testDeserializeFailsWrongTypeHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int64)
with self.assertRaisesOpError(
r"Requested SparseTensor of type int64 but "
r"SparseTensor\[0\].values.dtype\(\) == int32"):
sess.run(sp_deserialized,
{sp_input0: input0_val,
sp_input1: input1_val})
@test_util.run_deprecated_v1
def testDeserializeFailsWrongType(self):
self._testDeserializeFailsWrongTypeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testDeserializeManyFailsWrongType(self):
self._testDeserializeFailsWrongTypeHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantDeserializeFailsWrongType(self):
self._testDeserializeFailsWrongTypeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testDeserializeFailsInconsistentRankHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(
r"Inconsistent shape across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 2 but rank of SparseTensor\[1\] is: 3"):
sess.run(sp_deserialized,
{sp_input0: input0_val,
sp_input1: input1_val})
@test_util.run_deprecated_v1
def testDeserializeFailsInconsistentRank(self):
self._testDeserializeFailsInconsistentRankHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testDeserializeManyFailsInconsistentRank(self):
self._testDeserializeFailsInconsistentRankHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantDeserializeFailsInconsistentRank(self):
self._testDeserializeFailsInconsistentRankHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
def _testDeserializeFailsInvalidProtoHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = ["a", "b", "c"]
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(r"Could not parse serialized proto"):
sess.run(sp_deserialized, {sp_input0: input0_val})
@test_util.run_deprecated_v1
def testDeserializeFailsInvalidProto(self):
self._testDeserializeFailsInvalidProtoHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testDeserializeManyFailsInvalidProto(self):
self._testDeserializeFailsInvalidProtoHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
if __name__ == "__main__":
test.main()
|
GHSA-x3v8-c8qx-3j3r
|
django/utils/http.py
|
@@ -16,9 +16,20 @@
from django.utils.functional import keep_lazy_text
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlencode as original_urlencode,
- urlparse,
)
+if six.PY2:
+ from urlparse import (
+ ParseResult, SplitResult, _splitnetloc, _splitparams, scheme_chars,
+ uses_params,
+ )
+ _coerce_args = None
+else:
+ from urllib.parse import (
+ ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams,
+ scheme_chars, uses_params,
+ )
+
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
@@ -298,12 +309,64 @@ def is_safe_url(url, host=None):
return _is_safe_url(url, host) and _is_safe_url(url.replace('\\', '/'), host)
+# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
+def _urlparse(url, scheme='', allow_fragments=True):
+ """Parse a URL into 6 components:
+ <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
+ Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
+ Note that we don't break the components up in smaller bits
+ (e.g. netloc is a single string) and we don't expand % escapes."""
+ if _coerce_args:
+ url, scheme, _coerce_result = _coerce_args(url, scheme)
+ splitresult = _urlsplit(url, scheme, allow_fragments)
+ scheme, netloc, url, query, fragment = splitresult
+ if scheme in uses_params and ';' in url:
+ url, params = _splitparams(url)
+ else:
+ params = ''
+ result = ParseResult(scheme, netloc, url, params, query, fragment)
+ return _coerce_result(result) if _coerce_args else result
+
+
+# Copied from urllib.parse.urlsplit() with
+# https://github.com/python/cpython/pull/661 applied.
+def _urlsplit(url, scheme='', allow_fragments=True):
+ """Parse a URL into 5 components:
+ <scheme>://<netloc>/<path>?<query>#<fragment>
+ Return a 5-tuple: (scheme, netloc, path, query, fragment).
+ Note that we don't break the components up in smaller bits
+ (e.g. netloc is a single string) and we don't expand % escapes."""
+ if _coerce_args:
+ url, scheme, _coerce_result = _coerce_args(url, scheme)
+ allow_fragments = bool(allow_fragments)
+ netloc = query = fragment = ''
+ i = url.find(':')
+ if i > 0:
+ for c in url[:i]:
+ if c not in scheme_chars:
+ break
+ else:
+ scheme, url = url[:i].lower(), url[i + 1:]
+
+ if url[:2] == '//':
+ netloc, url = _splitnetloc(url, 2)
+ if (('[' in netloc and ']' not in netloc) or
+ (']' in netloc and '[' not in netloc)):
+ raise ValueError("Invalid IPv6 URL")
+ if allow_fragments and '#' in url:
+ url, fragment = url.split('#', 1)
+ if '?' in url:
+ url, query = url.split('?', 1)
+ v = SplitResult(scheme, netloc, url, query, fragment)
+ return _coerce_result(v) if _coerce_args else v
+
+
def _is_safe_url(url, host):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
- url_info = urlparse(url)
+ url_info = _urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
|
from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.core.exceptions import TooManyFieldsSent
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.functional import keep_lazy_text
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlencode as original_urlencode,
urlparse,
)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = str(":/?#[]@")
RFC3986_SUBDELIMS = str("!$&'()*+,;=")
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
@keep_lazy_text
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list, tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def unquote_etag(etag):
"""
Unquote an ETag string; i.e. revert quote_etag().
"""
return etag.strip('"').replace('\\"', '"').replace('\\\\', '\\') if etag else etag
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if url is not None:
url = url.strip()
if not url:
return False
if six.PY2:
try:
url = force_text(url)
except UnicodeDecodeError:
return False
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return _is_safe_url(url, host) and _is_safe_url(url.replace('\\', '/'), host)
def _is_safe_url(url, host):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
return ((not url_info.netloc or url_info.netloc == host) and
(not url_info.scheme or url_info.scheme in ['http', 'https']))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split(str('='), 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
if six.PY3:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
else:
name = unquote(nv[0].replace(b'+', b' '))
value = unquote(nv[1].replace(b'+', b' '))
r.append((name, value))
return r
|
GHSA-37hp-765x-j95x
|
tests/utils_tests/test_http.py
|
@@ -104,6 +104,8 @@ def test_is_safe_url(self):
r'http://testserver\me:[email protected]',
r'http://testserver\@example.com',
r'http:\\testserver\confirm\[email protected]',
+ 'http:999999999',
+ 'ftp:9999999999',
'\n',
)
for bad_url in bad_urls:
@@ -119,6 +121,7 @@ def test_is_safe_url(self):
'//testserver/',
'http://testserver/[email protected]',
'/url%20with%20spaces/',
+ 'path/http:2222222222',
)
for good_url in good_urls:
self.assertTrue(http.is_safe_url(good_url, host='testserver'), "%s should be allowed" % good_url)
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import sys
import unittest
from datetime import datetime
from django.utils import http, six
from django.utils.datastructures import MultiValueDict
class TestUtilsHttp(unittest.TestCase):
def test_urlencode(self):
# 2-tuples (the norm)
result = http.urlencode((('a', 1), ('b', 2), ('c', 3)))
self.assertEqual(result, 'a=1&b=2&c=3')
# A dictionary
result = http.urlencode({'a': 1, 'b': 2, 'c': 3})
acceptable_results = [
# Need to allow all of these as dictionaries have to be treated as
# unordered
'a=1&b=2&c=3',
'a=1&c=3&b=2',
'b=2&a=1&c=3',
'b=2&c=3&a=1',
'c=3&a=1&b=2',
'c=3&b=2&a=1'
]
self.assertIn(result, acceptable_results)
result = http.urlencode({'a': [1, 2]}, doseq=False)
self.assertEqual(result, 'a=%5B%271%27%2C+%272%27%5D')
result = http.urlencode({'a': [1, 2]}, doseq=True)
self.assertEqual(result, 'a=1&a=2')
result = http.urlencode({'a': []}, doseq=True)
self.assertEqual(result, '')
# A MultiValueDict
result = http.urlencode(MultiValueDict({
'name': ['Adrian', 'Simon'],
'position': ['Developer']
}), doseq=True)
acceptable_results = [
# MultiValueDicts are similarly unordered
'name=Adrian&name=Simon&position=Developer',
'position=Developer&name=Adrian&name=Simon'
]
self.assertIn(result, acceptable_results)
def test_base36(self):
# reciprocity works
for n in [0, 1, 1000, 1000000]:
self.assertEqual(n, http.base36_to_int(http.int_to_base36(n)))
if six.PY2:
self.assertEqual(sys.maxint, http.base36_to_int(http.int_to_base36(sys.maxint)))
# bad input
with self.assertRaises(ValueError):
http.int_to_base36(-1)
if six.PY2:
with self.assertRaises(ValueError):
http.int_to_base36(sys.maxint + 1)
for n in ['1', 'foo', {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
http.int_to_base36(n)
for n in ['#', ' ']:
with self.assertRaises(ValueError):
http.base36_to_int(n)
for n in [123, {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
http.base36_to_int(n)
# more explicit output testing
for n, b36 in [(0, '0'), (1, '1'), (42, '16'), (818469960, 'django')]:
self.assertEqual(http.int_to_base36(n), b36)
self.assertEqual(http.base36_to_int(b36), n)
def test_is_safe_url(self):
bad_urls = (
'http://example.com',
'http:///example.com',
'https://example.com',
'ftp://example.com',
r'\\example.com',
r'\\\example.com',
r'/\\/example.com',
r'\\\example.com',
r'\\example.com',
r'\\//example.com',
r'/\/example.com',
r'\/example.com',
r'/\example.com',
'http:///example.com',
'http:/\//example.com',
'http:\/example.com',
'http:/\example.com',
'javascript:alert("XSS")',
'\njavascript:alert(x)',
'\x08//example.com',
r'http://otherserver\@example.com',
r'http:\\testserver\@example.com',
r'http://testserver\me:[email protected]',
r'http://testserver\@example.com',
r'http:\\testserver\confirm\[email protected]',
'\n',
)
for bad_url in bad_urls:
self.assertFalse(http.is_safe_url(bad_url, host='testserver'), "%s should be blocked" % bad_url)
good_urls = (
'/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://example.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'http://testserver/[email protected]',
'/url%20with%20spaces/',
)
for good_url in good_urls:
self.assertTrue(http.is_safe_url(good_url, host='testserver'), "%s should be allowed" % good_url)
if six.PY2:
# Check binary URLs, regression tests for #26308
self.assertTrue(
http.is_safe_url(b'https://testserver/', host='testserver'),
"binary URLs should be allowed on Python 2"
)
self.assertFalse(http.is_safe_url(b'\x08//example.com', host='testserver'))
self.assertTrue(http.is_safe_url('àview/'.encode('utf-8'), host='testserver'))
self.assertFalse(http.is_safe_url('àview'.encode('latin-1'), host='testserver'))
# Valid basic auth credentials are allowed.
self.assertTrue(http.is_safe_url(r'http://user:pass@testserver/', host='user:pass@testserver'))
# A path without host is allowed.
self.assertTrue(http.is_safe_url('/confirm/[email protected]'))
# Basic auth without host is not allowed.
self.assertFalse(http.is_safe_url(r'http://testserver\@example.com'))
def test_urlsafe_base64_roundtrip(self):
bytestring = b'foo'
encoded = http.urlsafe_base64_encode(bytestring)
decoded = http.urlsafe_base64_decode(encoded)
self.assertEqual(bytestring, decoded)
def test_urlquote(self):
self.assertEqual(http.urlquote('Paris & Orl\xe9ans'), 'Paris%20%26%20Orl%C3%A9ans')
self.assertEqual(http.urlquote('Paris & Orl\xe9ans', safe="&"), 'Paris%20&%20Orl%C3%A9ans')
self.assertEqual(http.urlunquote('Paris%20%26%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(http.urlunquote('Paris%20&%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(http.urlquote_plus('Paris & Orl\xe9ans'), 'Paris+%26+Orl%C3%A9ans')
self.assertEqual(http.urlquote_plus('Paris & Orl\xe9ans', safe="&"), 'Paris+&+Orl%C3%A9ans')
self.assertEqual(http.urlunquote_plus('Paris+%26+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(http.urlunquote_plus('Paris+&+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
def test_is_same_domain_good(self):
for pair in (
('example.com', 'example.com'),
('example.com', '.example.com'),
('foo.example.com', '.example.com'),
('example.com:8888', 'example.com:8888'),
('example.com:8888', '.example.com:8888'),
('foo.example.com:8888', '.example.com:8888'),
):
self.assertTrue(http.is_same_domain(*pair))
def test_is_same_domain_bad(self):
for pair in (
('example2.com', 'example.com'),
('foo.example.com', 'example.com'),
('example.com:9999', 'example.com:8888'),
):
self.assertFalse(http.is_same_domain(*pair))
class ETagProcessingTests(unittest.TestCase):
def test_parsing(self):
etags = http.parse_etags(r'"", "etag", "e\"t\"ag", "e\\tag", W/"weak"')
self.assertEqual(etags, ['', 'etag', 'e"t"ag', r'e\tag', 'weak'])
def test_quoting(self):
original_etag = r'e\t"ag'
quoted_etag = http.quote_etag(original_etag)
self.assertEqual(quoted_etag, r'"e\\t\"ag"')
self.assertEqual(http.unquote_etag(quoted_etag), original_etag)
class HttpDateProcessingTests(unittest.TestCase):
def test_http_date(self):
t = 1167616461.0
self.assertEqual(http.http_date(t), 'Mon, 01 Jan 2007 01:54:21 GMT')
def test_cookie_date(self):
t = 1167616461.0
self.assertEqual(http.cookie_date(t), 'Mon, 01-Jan-2007 01:54:21 GMT')
def test_parsing_rfc1123(self):
parsed = http.parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_rfc850(self):
parsed = http.parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_asctime(self):
parsed = http.parse_http_date('Sun Nov 6 08:49:37 1994')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
|
GHSA-37hp-765x-j95x
|
python/paddle/tensor/manipulation.py
|
@@ -543,6 +543,8 @@ def unstack(x, axis=0, num=None):
raise ValueError(
'`axis` must be in the range [-{0}, {0})'.format(x.ndim)
)
+ if num is not None and (num < 0 or num > x.shape[axis]):
+ raise ValueError(f'`num` must be in the range [0, {x.shape[axis]})')
if in_dynamic_mode():
if num is None:
num = x.shape[axis]
@@ -4372,7 +4374,6 @@ def repeat_interleave(x, repeats, axis=None, name=None):
if axis is None:
x = paddle.flatten(x)
axis = 0
-
if in_dynamic_mode():
if isinstance(repeats, Variable):
return _C_ops.repeat_interleave_with_tensor_index(x, repeats, axis)
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define functions to manipulate a tensor
import numpy as np
import paddle
from paddle import _C_ops
from paddle.tensor import fill_constant
from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only
from ..fluid.data_feeder import (
check_dtype,
check_type,
check_variable_and_dtype,
convert_dtype,
)
from ..fluid.framework import Variable
from ..framework import (
LayerHelper,
convert_np_dtype_to_dtype_,
core,
dygraph_only,
in_dynamic_mode,
)
from .creation import _complex_to_real_dtype, _real_to_complex_dtype, zeros
__all__ = []
def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
r"""
This function concatenates or stacks all tensors in the input LoDTensorArray
along the axis mentioned and returns that as the output.
For Example:
.. code-block:: text
Case 1:
Given:
input.data = {[[0.6, 0.1, 0.3],
[0.5, 0.3, 0.2]],
[[1.3],
[1.8]],
[[2.3, 2.1],
[2.5, 2.4]]}
axis = 1, use_stack = False
Then:
output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
[0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]
output_index.data = [3, 1, 2]
Case 2:
Given:
input.data = {[[0.6, 0.1],
[0.5, 0.3]],
[[0.3, 1.3],
[0.2, 1.8]],
[[2.3, 2.1],
[2.5, 2.4]]}
axis = 1, use_stack = True
Then:
output.data = [[[0.6, 0.1]
[0.3, 1.3]
[2.3, 2.1],
[[0.5, 0.3]
[0.2, 1.8]
[2.5, 2.4]]]
output_index.data = [2, 2, 2]
Args:
input(TensorArray): A TensorArray variable.
axis(int): The axis along which the tensors in attr::`input` will be
concatenated or stacked.
use_stack(bool): Act as concat_op or stack_op. For stack mode, all
tensors in the tensor array must have the same shape.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Tensor: The concatenated or stacked tensor variable.
Tensor: A 1-D tensor variable with int32 data type. The data in this \
tensor contains all input including tensors' sizes along the axis.
Examples:
.. code-block:: python
import numpy
import paddle
x0 = paddle.assign(numpy.random.rand(2, 2).astype("float32"))
x1 = paddle.assign(numpy.random.rand(2, 2).astype("float32"))
i = paddle.full(shape=[1], dtype="int64", fill_value=0)
array = paddle.tensor.array.create_array(dtype='float32')
paddle.tensor.array.array_write(x0, i, array)
paddle.tensor.array.array_write(x1, i + 1, array)
output, output_index = paddle.tensor.manipulation.tensor_array_to_tensor(input=array)
"""
if in_dynamic_mode():
assert isinstance(
input, list
), "The 'input' in tensor_array_to_tensor must be list"
from paddle import concat, stack
op = stack if use_stack else concat
res = op(input, axis=axis)
sizes = paddle.to_tensor(np.array([int(x.shape[axis]) for x in input]))
return res, sizes
else:
check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
if isinstance(input, list):
for i, input_x in enumerate(input):
check_type(
input_x,
'input[' + str(i) + ']',
Variable,
'tensor_array_to_tensor',
)
helper = LayerHelper('tensor_array_to_tensor', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': input},
outputs={'Out': [out], 'OutIndex': [out_index]},
attrs={'axis': axis, 'use_stack': use_stack},
)
return out, out_index
def cast(x, dtype):
"""
Take in the Tensor :attr:`x` with :attr:`x.dtype` and cast it
to the output with :attr:`dtype`. It's meaningless if the output dtype
equals the input dtype, but it's fine if you do so.
Args:
x (Tensor): An input N-D Tensor with data type bool, float16,
float32, float64, int32, int64, uint8.
dtype (np.dtype|str): Data type of the output:
bool, float16, float32, float64, int8, int32, int64, uint8.
Returns:
Tensor, A Tensor with the same shape as input's.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 4], 'float64')
y = paddle.cast(x, 'uint8')
"""
if in_dynamic_mode():
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return _C_ops.cast(x, dtype)
else:
check_variable_and_dtype(
x,
'x',
[
'bool',
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'cast',
)
check_dtype(
dtype,
'dtype',
[
'bool',
'float16',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'cast',
)
helper = LayerHelper('cast', **locals())
out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=x.stop_gradient
)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype},
)
return out
def slice(input, axes, starts, ends):
"""
This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` (here 0 is the initial position).
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` and ``ends``.
Following examples will explain how slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000] # -1 denotes the reverse 0th position of dimension 0.
Then:
result = [ [2, 3, 4], ] # result = data[0:1, 1:4]
Args:
input (Tensor): A ``Tensor`` . The data type is ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to .
starts (list|tuple|Tensor): The data type is ``int32`` . If ``starts`` is a list or tuple, each element of
it should be integer or 0-D int Tensor with shape []. If ``starts`` is an Tensor, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Tensor): The data type is ``int32`` . If ``ends`` is a list or tuple, each element of
it should be integer or 0-D int Tensor with shape []. If ``ends`` is an Tensor, it should be an 1-D Tensor .
It represents ending indices of corresponding axis in ``axes``.
Returns:
Tensor, A ``Tensor``. The data type is same as ``input``.
Examples:
.. code-block:: python
import paddle
input = paddle.rand(shape=[4, 5, 6], dtype='float32')
# example 1:
# attr starts is a list which doesn't contain tensor.
axes = [0, 1, 2]
starts = [-3, 0, 2]
ends = [3, 2, 4]
sliced_1 = paddle.slice(input, axes=axes, starts=starts, ends=ends)
# sliced_1 is input[1:3, 0:2, 2:4].
# example 2:
# attr starts is a list which contain tensor.
minus_3 = paddle.full([1], -3, "int32")
sliced_2 = paddle.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends)
# sliced_2 is input[1:3, 0:2, 2:4].
"""
if in_dynamic_mode():
attrs = ()
starts_tensor = None
ends_tensor = None
if isinstance(axes, (list, tuple)):
axes = list(axes)
if len(axes) == 0:
raise ValueError(
"Input axes should not be an empty list/tuple."
)
for i in range(len(axes)):
if axes[i] < 0:
axes[i] = max(0, axes[i] + len(input.shape))
else:
axes[i] = min(len(input.shape) - 1, axes[i])
else:
raise ValueError(
"Input axes must be a python list or tuple, but reveived {}".format(
type(axes)
)
)
infer_flags = [1 for i in range(len(axes))]
if isinstance(starts, (list, tuple)):
starts = [
item.item(0) if isinstance(item, core.eager.Tensor) else item
for item in starts
]
elif isinstance(starts, core.eager.Tensor):
tensor_t = starts.numpy(False)
starts = list(tensor_t)
infer_flags = [-1 for i in range(len(axes))]
if isinstance(ends, (list, tuple)):
ends = [
item.item(0) if isinstance(item, core.eager.Tensor) else item
for item in ends
]
elif isinstance(ends, core.eager.Tensor):
tensor_t = ends.numpy(False)
ends = list(tensor_t)
infer_flags = [-1 for i in range(len(axes))]
return _C_ops.slice(input, axes, starts, ends, infer_flags, [])
else:
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
"Input starts must be an Variable, python list or tuple."
)
if not isinstance(ends, (list, tuple, Variable)):
raise ValueError(
"Input ends must be an Variable, python list or tuple."
)
helper = LayerHelper('slice', **locals())
inputs = {'Input': input}
attrs = {'axes': axes}
infer_flags = [1 for i in range(len(axes))]
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
infer_flags = [-1 for i in range(len(axes))]
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if paddle.utils._contain_var(starts):
inputs[
'StartsTensorList'
] = paddle.utils._convert_to_tensor_list(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
infer_flags = [-1 for i in range(len(axes))]
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if paddle.utils._contain_var(ends):
inputs['EndsTensorList'] = paddle.utils._convert_to_tensor_list(
ends
)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# infer_flags
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input')
)
helper.append_op(
type='slice', inputs=inputs, attrs=attrs, outputs={'Out': out}
)
return out
def transpose(x, perm, name=None):
"""
Permute the data dimensions of `input` according to `perm`.
The `i`-th dimension of the returned tensor will correspond to the
perm[i]-th dimension of `input`.
Args:
x (Tensor): The input Tensor. It is a N-D Tensor of data types bool, float32, float64, int32.
perm (list|tuple): Permute the input according to the data of perm.
name (str): The name of this layer. It is optional.
Returns:
Tensor, A transposed n-D Tensor, with data type being bool, float32, float64, int32, int64.
For Example:
.. code-block:: text
x = [[[ 1 2 3 4] [ 5 6 7 8] [ 9 10 11 12]]
[[13 14 15 16] [17 18 19 20] [21 22 23 24]]]
shape(x) = [2,3,4]
# Example 1
perm0 = [1,0,2]
y_perm0 = [[[ 1 2 3 4] [13 14 15 16]]
[[ 5 6 7 8] [17 18 19 20]]
[[ 9 10 11 12] [21 22 23 24]]]
shape(y_perm0) = [3,2,4]
# Example 2
perm1 = [2,1,0]
y_perm1 = [[[ 1 13] [ 5 17] [ 9 21]]
[[ 2 14] [ 6 18] [10 22]]
[[ 3 15] [ 7 19] [11 23]]
[[ 4 16] [ 8 20] [12 24]]]
shape(y_perm1) = [4,3,2]
Examples:
.. code-block:: python
import paddle
x = paddle.randn([2, 3, 4])
x_transposed = paddle.transpose(x, perm=[1, 0, 2])
print(x_transposed.shape)
# [3L, 2L, 4L]
"""
if in_dynamic_mode():
return _C_ops.transpose(x, perm)
else:
check_variable_and_dtype(
x,
'x',
[
'bool',
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
'complex64',
'complex128',
],
'transpose',
)
check_type(perm, 'perm', (list, tuple), 'transpose')
if isinstance(perm, tuple):
perm = list(perm)
if len(perm) != len(x.shape):
raise ValueError(
"Input(perm) is the permutation of dimensions of Input(x), "
"its length should be equal to dimensions of Input(x), "
"but received dimension of Input(x) is {}, "
"the length of Input(perm) is {}.".format(
len(x.shape), len(perm)
)
)
for idx, dim in enumerate(perm):
if dim >= len(x.shape):
raise ValueError(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"dimension %d." % (idx, perm[idx], len(x.shape))
)
helper = LayerHelper('transpose', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='transpose2',
inputs={'X': [x]},
outputs={'Out': [out], 'XShape': [x_shape]},
attrs={'axis': perm},
)
return out
def unstack(x, axis=0, num=None):
"""
This layer unstacks input Tensor :code:`x` into several Tensors along :code:`axis`.
If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x)`.
If :code:`num` is None, it would be inferred from :code:`x.shape[axis]`,
and if :code:`x.shape[axis]` <= 0 or is unknown, :code:`ValueError` is
raised.
Args:
x (Tensor): Input Tensor. It is a N-D Tensors of data types float32, float64, int32, int64.
axis (int): The axis along which the input is unstacked.
num (int|None): The number of output variables.
Returns:
list(Tensor), The unstacked Tensors list. The list elements are N-D Tensors of data types float32, float64, int32, int64.
Examples:
.. code-block:: python
import paddle
x = paddle.ones(name='x', shape=[2, 3, 5], dtype='float32') # create a tensor with shape=[2, 3, 5]
y = paddle.unstack(x, axis=1) # unstack with second axis, which results 3 tensors with shape=[2, 5]
"""
if not (-x.ndim <= axis < x.ndim):
raise ValueError(
'`axis` must be in the range [-{0}, {0})'.format(x.ndim)
)
if in_dynamic_mode():
if num is None:
num = x.shape[axis]
if num == 0:
return []
return _C_ops.unstack(x, axis, num)
else:
helper = LayerHelper('unstack', **locals())
if num is None:
if axis is None or x.shape[axis] <= 0:
raise ValueError('unknown unstack number')
else:
num = x.shape[axis]
outs = []
for _ in range(num):
outs.append(helper.create_variable_for_type_inference(x.dtype))
helper.append_op(
type='unstack',
inputs={'X': [x]},
outputs={'Y': outs},
attrs={'axis': axis, 'num': num},
)
return outs
def shard_index(input, index_num, nshards, shard_id, ignore_value=-1):
"""
Reset the values of `input` according to the shard it beloning to.
Every value in `input` must be a non-negative integer, and
the parameter `index_num` represents the integer above the maximum
value of `input`. Thus, all values in `input` must be in the range
[0, index_num) and each value can be regarded as the offset to the beginning
of the range. The range is further split into multiple shards. Specifically,
we first compute the `shard_size` according to the following formula,
which represents the number of integers each shard can hold. So for the
i'th shard, it can hold values in the range [i*shard_size, (i+1)*shard_size).
::
shard_size = (index_num + nshards - 1) // nshards
For each value `v` in `input`, we reset it to a new value according to the
following formula:
::
v = v - shard_id * shard_size if shard_id * shard_size <= v < (shard_id+1) * shard_size else ignore_value
That is, the value `v` is set to the new offset within the range represented by the shard `shard_id`
if it in the range. Otherwise, we reset it to be `ignore_value`.
Args:
input (Tensor): Input tensor with data type int64 or int32. It's last dimension must be 1.
index_num (int): An integer represents the integer above the maximum value of `input`.
nshards (int): The number of shards.
shard_id (int): The index of the current shard.
ignore_value (int, optional): An integer value out of sharded index range. The default value is -1.
Returns:
Tensor.
Examples:
.. code-block:: python
import paddle
label = paddle.to_tensor([[16], [1]], "int64")
shard_label = paddle.shard_index(input=label,
index_num=20,
nshards=2,
shard_id=0)
print(shard_label)
# [[-1], [1]]
"""
if in_dynamic_mode():
return _C_ops.shard_index(
input, index_num, nshards, shard_id, ignore_value
)
check_variable_and_dtype(input, 'input', ['int64', 'int32'], 'shard_index')
op_type = 'shard_index'
helper = LayerHelper(op_type, **locals())
if shard_id < 0 or shard_id >= nshards:
raise ValueError(
'The shard_id(%d) should be in [0, %d)' % (shard_id, nshards)
)
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=op_type,
inputs={'X': [input]},
outputs={'Out': out},
attrs={
'index_num': index_num,
'nshards': nshards,
'shard_id': shard_id,
'ignore_value': ignore_value,
},
stop_gradient=True,
)
return out
def crop(x, shape=None, offsets=None, name=None):
"""
Crop input into output, as specified by offsets and shape.
.. code-block:: text
* Case 1 (input is a 2-D Tensor):
Input:
X.shape = [3, 5]
X.data = [[0, 1, 2, 0, 0],
[0, 3, 4, 0, 0],
[0, 0, 0, 0, 0]]
Parameters:
shape = [2, 2]
offsets = [0, 1]
Output:
Out.shape = [2, 2]
Out.data = [[1, 2],
[3, 4]]
* Case 2 (input is a 3-D Tensor):
Input:
X.shape = [2, 3, 4]
X.data = [[[0, 1, 2, 3],
[0, 5, 6, 7],
[0, 0, 0, 0]],
[[0, 3, 4, 5],
[0, 6, 7, 8],
[0, 0, 0, 0]]]
Parameters:
shape = [2, 2, -1]
offsets = [0, 0, 1]
Output:
Out.shape = [2, 2, 3]
Out.data = [[[1, 2, 3],
[5, 6, 7]],
[[3, 4, 5],
[6, 7, 8]]]
Parameters:
x (Tensor): 1-D to 6-D Tensor, the data type is float32, float64, int32 or int64.
shape (list|tuple|Tensor, optional): The output shape is specified
by `shape`. Its data type is int32. If a list/tuple, it's length must be
the same as the dimension size of `x`. If a Tensor, it should be a 1-D Tensor.
When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the shape may
be changed each iteration.
offsets (list|tuple|Variable, optional): Specifies the cropping
offsets at each dimension. Its data type is int32. If a list/tuple, it's length
must be the same as the dimension size of `x`. If a Tensor, it should be a 1-D
Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1].
If Variable contained, it is suitable for the case that the offsets may be changed
each iteration. Default: None, the offsets are 0 at each dimension.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, The cropped Tensor has same data type with `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# x.shape = [3, 3]
# x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# shape can be a 1-D Tensor or list or tuple.
shape = paddle.to_tensor([2, 2], dtype='int32')
# shape = [2, 2]
# shape = (2, 2)
out = paddle.crop(x, shape)
# out.shape = [2, 2]
# out = [[1,2], [4,5]]
# offsets can be a 1-D Tensor or list or tuple.
offsets = paddle.to_tensor([0, 1], dtype='int32')
# offsets = [1, 0]
# offsets = (1, 1)
out = paddle.crop(x, shape, offsets)
# out.shape = [2, 2]
# if offsets = [0, 0], out = [[1,2], [4,5]]
# if offsets = [0, 1], out = [[2,3], [5,6]]
# if offsets = [1, 0], out = [[4,5], [7,8]]
# if offsets = [1, 1], out = [[5,6], [8,9]]
"""
helper = LayerHelper('crop_tensor', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'crop_tensor'
)
check_type(
shape, 'shape', (list, tuple, Variable, type(None)), 'crop_tensor'
)
check_type(
offsets, 'offsets', (list, tuple, Variable, type(None)), 'crop_tensor'
)
if offsets is None:
offsets = [0] * len(x.shape)
if shape is None:
shape = x.shape
if in_dynamic_mode():
return _C_ops.crop(x, shape, offsets)
out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x}
attrs = {}
def _attr_shape_check(shape_val):
if not isinstance(shape_val, int):
raise TypeError(
"Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(shape_val)
)
if shape_val == 0:
raise ValueError(
"Attr(shape) of Op(crop_tensor) should not be zero, but received: %s."
% str(shape_val)
)
if shape_val < -1:
raise ValueError(
"When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s."
% str(shape_val)
)
def _attr_offsets_check(offset_val):
if not isinstance(offset_val, int):
raise TypeError(
"Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s."
% type(offset_val)
)
if offset_val < 0:
raise ValueError(
"Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s."
% str(offset_val)
)
if isinstance(offsets, Variable):
offsets.stop_gradient = True
ipts['Offsets'] = offsets
attrs['offsets'] = [-1] * len(x.shape)
elif paddle.utils._contain_var(offsets):
new_offsets_tensor = []
offsets_attr = []
for dim in offsets:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_offsets_tensor.append(dim)
offsets_attr.append(-1)
else:
_attr_offsets_check(dim)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_offsets_tensor.append(temp_out)
offsets_attr.append(dim)
ipts['OffsetsTensor'] = new_offsets_tensor
attrs['offsets'] = offsets_attr
else:
for offset in offsets:
_attr_offsets_check(offset)
attrs['offsets'] = offsets
if isinstance(shape, Variable):
shape.stop_gradient = True
ipts['Shape'] = shape
elif paddle.utils._contain_var(shape):
new_shape_tensor = []
shape_attr = []
for dim_size in shape:
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
new_shape_tensor.append(dim_size)
shape_attr.append(0)
else:
_attr_shape_check(dim_size)
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out
)
new_shape_tensor.append(temp_out)
shape_attr.append(dim_size)
ipts['ShapeTensor'] = new_shape_tensor
attrs['shape'] = shape_attr
else:
for dim_size in shape:
_attr_shape_check(dim_size)
attrs['shape'] = shape
helper.append_op(
type='crop_tensor',
inputs=ipts,
outputs={'Out': out},
attrs=None if len(attrs) == 0 else attrs,
)
return out
@dygraph_only
def fill_(x, value):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
This function fill the Tensor with value inplace.
Args:
x (Tensor): ``x`` is the Tensor we want to filled data inplace
value (Scale): ``value`` is the value to be filled in x
Returns:
x(Tensor), Tensor x filled with value inplace
Examples:
.. code-block:: python
import paddle
tensor = paddle.to_tensor([0, 1, 2, 3, 4])
tensor.fill_(0)
print(tensor.tolist()) #[0, 0, 0, 0, 0]
"""
if not isinstance(value, (float, int)):
raise TypeError(
"The type of 'value' must be int or float, but received %s."
% (type(value))
)
return _C_ops.fill_(x, value)
@dygraph_only
def zero_(x):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
This function fill the Tensor with zero inplace.
Args:
x (Tensor): ``x`` is the Tensor we want to filled with zero inplace
Returns:
x (Tensor), Tensor x filled with zero inplace
Examples:
.. code-block:: python
import paddle
tensor = paddle.to_tensor([0, 1, 2, 3, 4])
tensor.zero_()
print(tensor.tolist()) #[0, 0, 0, 0, 0]
"""
return _C_ops.fill_(x, 0.0)
@dygraph_only
def fill_diagonal_(x, value, offset=0, wrap=False, name=None):
"""
Note:
This API is ONLY available in Dygraph mode.
This function fill the value into the x Tensor's diagonal inplace.
Args:
x(Tensor): ``x`` is the original Tensor
value(Scale): ``value`` is the value to filled in x
offset(int,optional): the offset to the main diagonal. Default: 0 (main diagonal).
wrap(bool,optional): the diagonal 'wrapped' after N columns for tall matrices.
name(str,optional): Name for the operation (optional, default is None)
Returns:
Tensor, Tensor with diagonal filled with value.
Examples:
.. code-block:: python
import paddle
x = paddle.ones((4, 3)) * 2
x.fill_diagonal_(1.0)
print(x.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]]
"""
if in_dynamic_mode():
if len(x.shape) == 2:
return _C_ops.fill_diagonal_(x, value, offset, wrap)
return _C_ops.fill_diagonal_(x, value, offset, True)
def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False):
inshape = x.shape
assert dim1 < len(inshape) and dim1 >= -len(
inshape
), 'dim1 should between [-rank,rank) in fill_diagonal_tensor_'
assert dim2 < len(inshape) and dim2 >= -len(
inshape
), 'dim2 should between [-rank,rank) in fill_diagonal_tensor_'
assert len(inshape) >= 2, 'Tensor dims should >= 2 in fill_diagonal_tensor_'
dim1 %= len(inshape)
dim2 %= len(inshape)
predshape = []
for i in range(len(inshape)):
if i != dim1 and i != dim2:
predshape.append(inshape[i])
diaglen = min(
min(inshape[dim1], inshape[dim1] + offset),
min(inshape[dim2], inshape[dim2] - offset),
)
predshape.append(diaglen)
assert tuple(predshape) == tuple(
y.shape
), f"the y shape should be {predshape}"
if len(y.shape) == 1:
y = y.reshape([1, -1])
if inplace:
return _C_ops.fill_diagonal_tensor_(x, y, offset, dim1, dim2)
return _C_ops.fill_diagonal_tensor(x, y, offset, dim1, dim2)
def fill_diagonal_tensor_(x, y, offset=0, dim1=0, dim2=1, name=None):
"""
Note:
This API is ONLY available in Dygraph mode.
This function fill the source Tensor y into the x Tensor's diagonal inplace.
Args:
x (Tensor): ``x`` is the original Tensor
y (Tensor): ``y`` is the Tensor to filled in x
dim1 (int,optional): first dimension with respect to which to fill diagonal. Default: 0.
dim2 (int,optional): second dimension with respect to which to fill diagonal. Default: 1.
offset (int,optional): the offset to the main diagonal. Default: 0 (main diagonal).
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, Tensor with diagonal filled with y.
Examples:
.. code-block:: python
import paddle
x = paddle.ones((4, 3)) * 2
y = paddle.ones((3,))
x.fill_diagonal_tensor_(y)
print(x.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]]
"""
return _fill_diagonal_tensor_impl(
x, y, offset=offset, dim1=dim1, dim2=dim2, inplace=True
)
def fill_diagonal_tensor(x, y, offset=0, dim1=0, dim2=1, name=None):
"""
This function fill the source Tensor y into the x Tensor's diagonal.
Args:
x (Tensor): ``x`` is the original Tensor
y (Tensor): ``y`` is the Tensor to filled in x
dim1 (int,optional): first dimension with respect to which to fill diagonal. Default: 0.
dim2 (int,optional): second dimension with respect to which to fill diagonal. Default: 1.
offset (int,optional): the offset to the main diagonal. Default: 0 (main diagonal).
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, Tensor with diagonal filled with y.
Examples:
.. code-block:: python
import paddle
x = paddle.ones((4, 3)) * 2
y = paddle.ones((3,))
nx = x.fill_diagonal_tensor(y)
print(nx.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]]
"""
return _fill_diagonal_tensor_impl(
x, y, offset=offset, dim1=dim1, dim2=dim2, inplace=False
)
@dygraph_only
def tolist(x):
"""
Note:
This API is ONLY available in Dygraph mode.
This function translate the paddle.Tensor to python list.
Args:
x (Tensor): ``x`` is the Tensor we want to translate to list.
Returns:
list, A list that contain the same value of current Tensor.
Examples:
.. code-block:: python
import paddle
t = paddle.to_tensor([0,1,2,3,4])
expectlist = t.tolist()
print(expectlist) #[0, 1, 2, 3, 4]
expectlist = paddle.tolist(t)
print(expectlist) #[0, 1, 2, 3, 4]
"""
# TODO(zhouwei): will remove 0-D Tensor.numpy() hack
return x.numpy(False).tolist()
def concat(x, axis=0, name=None):
"""
Concatenates the input along the axis. It doesn't support 0-D Tensor because it requires a certain axis, and 0-D Tensor
doesn't have any axis.
Args:
x (list|tuple): ``x`` is a Tensor list or Tensor tuple which is with data type bool, float16,
float32, float64, int32, int64, int8, uint8. All the Tensors in ``x`` must have same data type.
axis (int|Tensor, optional): Specify the axis to operate on the input Tensors.
Tt should be integer or 0-D int Tensor with shape []. The effective range is [-R, R), where R is Rank(x). When ``axis < 0``,
it works the same way as ``axis+R``. Default is 0.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, A Tensor with the same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1, 2, 3],
[4, 5, 6]])
x2 = paddle.to_tensor([[11, 12, 13],
[14, 15, 16]])
x3 = paddle.to_tensor([[21, 22],
[23, 24]])
zero = paddle.full(shape=[1], dtype='int32', fill_value=0)
# When the axis is negative, the real axis is (axis + Rank(x))
# As follow, axis is -1, Rank(x) is 2, the real axis is 1
out1 = paddle.concat(x=[x1, x2, x3], axis=-1)
out2 = paddle.concat(x=[x1, x2], axis=0)
out3 = paddle.concat(x=[x1, x2], axis=zero)
# out1
# [[ 1 2 3 11 12 13 21 22]
# [ 4 5 6 14 15 16 23 24]]
# out2 out3
# [[ 1 2 3]
# [ 4 5 6]
# [11 12 13]
# [14 15 16]]
"""
input = x
if in_dynamic_mode():
if isinstance(axis, Variable):
axis = axis.item(0)
if not isinstance(input, Variable):
input = [t for t in input if t.shape.count(0) == 0]
return _C_ops.concat(input, axis)
else:
check_type(input, 'input', (list, tuple, Variable), 'concat')
if not isinstance(input, Variable):
for id, x in enumerate(input):
check_variable_and_dtype(
x,
'input[' + str(id) + ']',
[
'bool',
'float16',
'float32',
'float64',
'int32',
'int64',
'int8',
'unit8',
'uint16',
],
'concat',
)
if x.dtype != input[0].dtype:
raise TypeError(
"All the Tensors in the input must have the same data type."
)
else:
input = [input]
check_type(axis, 'axis', (int, Variable), 'concat')
if isinstance(axis, Variable):
check_dtype(
axis.dtype,
'axis',
['int32', 'int64'],
'concat',
"The data type of axis must be int32 or int64 when axis is a Tensor",
)
helper = LayerHelper('concat', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)
if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
# NOTE(liym27): Don't remove this if branch!
# This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
# is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static graph mode.
assert len(input) == 1, (
"If the elements of 'input' in concat are Variable(LoDTensorArray), "
"number of the elements must be 1, but received %s."
% len(input)
)
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': input[0]},
outputs={'Out': [out], 'OutIndex': [out_index]},
attrs={'axis': axis, 'use_stack': False},
)
else:
inputs = {'X': input}
attrs = {}
if isinstance(axis, Variable):
axis.stop_gradient = True
inputs['AxisTensor'] = axis
else:
attrs['axis'] = axis
helper.append_op(
type='concat',
inputs=inputs,
outputs={'Out': [out]},
attrs=attrs,
)
return out
def broadcast_tensors(input, name=None):
"""
Broadcast a list of tensors following broadcast semantics
Note:
If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .
.. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
Args:
input (list|tuple): ``input`` is a Tensor list or Tensor tuple which is with data type bool,
float16, float32, float64, int32, int64. All the Tensors in ``input`` must have same data type.
Currently we only support tensors with rank no greater than 5.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
list(Tensor), The list of broadcasted tensors following the same order as ``input``.
Examples:
.. code-block:: python
import paddle
x1 = paddle.rand([1, 2, 3, 4]).astype('float32')
x2 = paddle.rand([1, 2, 1, 4]).astype('float32')
x3 = paddle.rand([1, 1, 3, 1]).astype('float32')
out1, out2, out3 = paddle.broadcast_tensors(input=[x1, x2, x3])
# out1, out2, out3: tensors broadcasted from x1, x2, x3 with shape [1,2,3,4]
"""
num_inputs = len(input)
if in_dynamic_mode():
return _C_ops.broadcast_tensors(input)
else:
check_type(input, 'input', (list, tuple), 'broadcast_tensors')
if num_inputs < 1:
raise TypeError(
"At least 1 tensor is needed to perform broadcast_tensors"
)
# Check input types
for id, x in enumerate(input):
check_variable_and_dtype(
x,
'input[' + str(id) + ']',
[
'bool',
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
],
'broadcast_tensors',
)
if x.dtype != input[0].dtype:
raise TypeError(
"All the Tensors in the input must have the same data type."
)
# Check bcast semantics
output_shape_r_last_tensor_index = []
output_shape_r = []
# Use while loop due to weird behaviour of "range()"
j = 0
while j < len(input):
tensor = input[j]
shape = list(reversed(tensor.shape))
i = 0
while i < len(shape):
if len(output_shape_r) <= i:
output_shape_r.append(shape[i])
output_shape_r_last_tensor_index.append(j)
else:
invalid = (
output_shape_r[i] != shape[i]
and output_shape_r[i] != 1
and shape[i] != 1
)
if invalid:
last_index = output_shape_r_last_tensor_index[i]
raise TypeError(
"Input tensors to broadcast_tensors does not follow bcast semantics"
f"Tensor {last_index} conflicts with Tensor {j} in reversed dimension {i}"
)
if output_shape_r[i] <= shape[i]:
output_shape_r[i] = shape[i]
output_shape_r_last_tensor_index[i] = j
i += 1 # while i < len(shape)
j += 1 # while j < len(input)
helper = LayerHelper('broadcast_tensors', **locals())
i = 0
out = []
while i < num_inputs:
out.append(
helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)
)
i += 1
inputs = {'X': input}
helper.append_op(
type='broadcast_tensors',
inputs=inputs,
outputs={'Out': out},
attrs={},
)
return out
def flip(x, axis, name=None):
"""
Reverse the order of a n-D tensor along given axis in axis.
Args:
x (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor x
should be float32, float64, int32, int64, bool.
axis (list|tuple|int): The axis(axes) to flip on. Negative indices for indexing from the end are accepted.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, Tensor or LoDTensor calculated by flip layer. The data type is same with input x.
Examples:
.. code-block:: python
import paddle
image_shape=(3, 2, 2)
img = paddle.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)
tmp = paddle.flip(img, [0,1])
print(tmp) # [[[10,11],[8, 9]], [[6, 7],[4, 5]], [[2, 3],[0, 1]]]
out = paddle.flip(tmp,-1)
print(out) # [[[11,10],[9, 8]], [[7, 6],[5, 4]], [[3, 2],[1, 0]]]
"""
if isinstance(axis, int):
axis = [axis]
if in_dynamic_mode():
return _C_ops.flip(x, axis)
else:
helper = LayerHelper("flip", **locals())
check_type(x, 'X', (Variable), 'flip')
dtype = helper.input_dtype('x')
check_dtype(
dtype,
'X',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'flip',
)
check_type(axis, 'axis', (list, tuple), 'flip')
if name is None:
out = helper.create_variable_for_type_inference(dtype)
else:
out = helper.create_variable(
name=name, dtype=dtype, persistable=False
)
helper.append_op(
type="flip",
inputs={"X": x},
outputs={"Out": out},
attrs={"axis": axis},
)
return out
def rot90(x, k=1, axes=[0, 1], name=None):
"""
Rotate a n-D tensor by 90 degrees. The rotation direction and times are specified by axes and the absolute value of k. Rotation direction is from axes[0] towards axes[1] if k > 0, and from axes[1] towards axes[0] for k < 0.
Args:
x (Tensor): The input Tensor(or LoDTensor). The data type of the input Tensor x
should be float16, float32, float64, int32, int64, bool. float16 is only supported on gpu.
k (int, optional): Direction and number of times to rotate, default value: 1.
axes (list|tuple, optional): Axes to rotate, dimension must be 2. default value: [0, 1].
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor, Tensor or LoDTensor calculated by rot90 layer. The data type is same with input x.
Examples:
.. code-block:: python
import paddle
data = paddle.arange(4)
data = paddle.reshape(data, (2, 2))
print(data)
#[[0, 1],
# [2, 3]]
y = paddle.rot90(data, 1, [0, 1])
print(y)
#[[1, 3],
# [0, 2]]
y= paddle.rot90(data, -1, [0, 1])
print(y)
#[[2, 0],
# [3, 1]]
data2 = paddle.arange(8)
data2 = paddle.reshape(data2, (2,2,2))
print(data2)
#[[[0, 1],
# [2, 3]],
# [[4, 5],
# [6, 7]]]
y = paddle.rot90(data2, 1, [1, 2])
print(y)
#[[[1, 3],
# [0, 2]],
# [[5, 7],
# [4, 6]]]
"""
helper = LayerHelper("rot90", **locals())
check_type(x, 'X', (Variable), 'rot90')
dtype = helper.input_dtype('x')
check_dtype(
dtype,
'X',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'rot90',
)
check_type(axes, 'axes', (list, tuple), 'rot90')
input_total_dims = len(x.shape)
total_rot_dims = len(axes)
if total_rot_dims != 2:
raise ValueError(
"expected total rotation axes == 2, but got axes = {}".format(
total_rot_dims
)
)
if input_total_dims < 2:
raise ValueError(
"expected total dims >= 2, but got total dims = {}".format(
input_total_dims
)
)
if not (axes[0] != axes[1] and abs(axes[0] - axes[1]) != input_total_dims):
raise ValueError(
"expected rotation axes to be different, but got axis0 = {}, and axis1 = {}".format(
axes[0], axes[1]
)
)
if not (axes[0] < input_total_dims and axes[0] >= -input_total_dims):
raise ValueError(f"Rotation axis0 out of range, axis0 = {axes[0]}")
if not (axes[1] < input_total_dims and axes[1] >= -input_total_dims):
raise ValueError(f"Rotation axis1 out of range, axis1 = {axes[1]}")
k %= 4
if k == 0:
return x
if k == 2:
return flip(flip(x, axes[0]), axes[1])
axes_list = list(range(0, input_total_dims))
(axes_list[axes[0]], axes_list[axes[1]]) = (
axes_list[axes[1]],
axes_list[axes[0]],
)
if k == 1:
return transpose(flip(x, axes[1]), axes_list)
else:
# k == 3
return flip(transpose(x, axes_list), axes[1])
def flatten(x, start_axis=0, stop_axis=-1, name=None):
r"""
Flattens a contiguous range of axes in a tensor according to start_axis and stop_axis.
Note:
The output Tensor will share data with origin Tensor and doesn't have a Tensor copy in ``dygraph`` mode.
If you want to use the Tensor copy version, please use `Tensor.clone` like ``flatten_clone_x = x.flatten().clone()``.
For Example:
.. code-block:: text
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 1
end_axis = 2
We get:
Out.shape = (3, 1000 * 100, 2)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 0
stop_axis = -1
We get:
Out.shape = (3 * 100 * 100 * 4)
Args:
x (Tensor): A tensor of number of dimentions >= axis. A tensor with data type float16, float32,
float64, int8, int32, int64, uint8.
start_axis (int): the start axis to flatten
stop_axis (int): the stop axis to flatten
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, A tensor with the contents of the input tensor, with input \
axes flattened by indicated start axis and end axis. \
A Tensor with data type same as input x.
Examples:
.. code-block:: python
import paddle
image_shape=(2, 3, 4, 4)
x = paddle.arange(end=image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3])
img = paddle.reshape(x, image_shape)
out = paddle.flatten(img, start_axis=1, stop_axis=2)
# out shape is [2, 12, 4]
# out shares data with img in dygraph mode
img[0, 0, 0, 0] = -1
print(out[0, 0, 0]) # [-1]
"""
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Tensor")
x_dim = len(x.shape)
if x_dim == 0:
if not (isinstance(start_axis, int)) or start_axis not in [0, -1]:
raise ValueError(
"The start_axis should be int, and should be 0 or -1 when the input tensor is a 0-D-Tensor"
)
if not (isinstance(stop_axis, int)) or stop_axis not in [0, -1]:
raise ValueError(
"The stop_axis should be int, and should be 0 or -1 when the input tensor is a 0-D-Tensor"
)
else:
if (
not (isinstance(start_axis, int))
or (start_axis > x_dim - 1)
or start_axis < -x_dim
):
raise ValueError(
"The start_axis should be a int, and in range [-rank(x), rank(x))"
)
if (
not (isinstance(stop_axis, int))
or (stop_axis > x_dim - 1)
or stop_axis < -x_dim
):
raise ValueError(
"The stop_axis should be a int, and in range [-rank(x), rank(x))"
)
if start_axis < 0:
start_axis = start_axis + x_dim
if stop_axis < 0:
stop_axis = stop_axis + x_dim
if start_axis > stop_axis:
raise ValueError("The stop_axis should be larger than stat_axis")
if in_dynamic_mode():
return _C_ops.flatten(x, start_axis, stop_axis)
else:
check_variable_and_dtype(
x,
'x',
[
'float16',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'flatten',
)
helper = LayerHelper('flatten', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='flatten_contiguous_range',
inputs={"X": x},
outputs={'Out': out, 'XShape': x_shape},
attrs={"start_axis": start_axis, "stop_axis": stop_axis},
)
return out
@inplace_apis_in_dygraph_only
def flatten_(x, start_axis=0, stop_axis=-1, name=None):
"""
Inplace version of ``flatten`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_flatten`.
"""
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Tensor")
x_dim = len(x.shape)
if (
not (isinstance(start_axis, int))
or (start_axis > x_dim - 1)
or start_axis < -x_dim
):
raise ValueError(
"The start_axis should be a int, and in range [-rank(x), rank(x))"
)
if (
not (isinstance(stop_axis, int))
or (stop_axis > x_dim - 1)
or stop_axis < -x_dim
):
raise ValueError(
"The stop_axis should be a int, and in range [-rank(x), rank(x))"
)
if start_axis < 0:
start_axis = start_axis + x_dim
if stop_axis < 0:
stop_axis = stop_axis + x_dim
if start_axis > stop_axis:
raise ValueError("The stop_axis should be larger than stat_axis")
if in_dynamic_mode():
return _C_ops.flatten_(x, start_axis, stop_axis)
def roll(x, shifts, axis=None, name=None):
"""
Roll the `x` tensor along the given axis(axes). With specific 'shifts', Elements that
roll beyond the last position are re-introduced at the first according to 'shifts'.
If a axis is not specified,
the tensor will be flattened before rolling and then restored to the original shape.
Args:
x (Tensor): The x tensor as input.
shifts (int|list|tuple): The number of places by which the elements
of the `x` tensor are shifted.
axis (int|list|tuple, optional): axis(axes) along which to roll. Default: None
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor, A Tensor with same data type as `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]])
out_z1 = paddle.roll(x, shifts=1)
print(out_z1)
#[[9. 1. 2.]
# [3. 4. 5.]
# [6. 7. 8.]]
out_z2 = paddle.roll(x, shifts=1, axis=0)
print(out_z2)
#[[7. 8. 9.]
# [1. 2. 3.]
# [4. 5. 6.]]
out_z3 = paddle.roll(x, shifts=1, axis=1)
print(out_z3)
#[[3. 1. 2.]
# [6. 4. 5.]
# [9. 7. 8.]]
"""
origin_shape = x.shape
if type(shifts) == int:
shifts = [shifts]
if type(axis) == int:
axis = [axis]
len_origin_shape = len(origin_shape)
if axis is not None:
for i in range(len(axis)):
if axis[i] >= len_origin_shape or axis[i] < -len_origin_shape:
raise ValueError(
"axis is out of range, it should be in range [{}, {}), but received {}".format(
-len_origin_shape, len_origin_shape, axis
)
)
else:
axis = []
if in_dynamic_mode():
return _C_ops.roll(x, shifts, axis)
else:
check_variable_and_dtype(
x,
'dtype',
[
'float16',
'float32',
'uint16',
'float64',
'int32',
'int64',
'complex64',
'complex128',
],
'roll',
)
helper = LayerHelper("roll", **locals())
check_type(axis, 'axis', (list, tuple), 'roll')
out = helper.create_variable_for_type_inference(x.dtype)
if isinstance(shifts, Variable):
helper.append_op(
type='roll',
inputs={'X': x, "ShiftsTensor": shifts},
outputs={'Out': out},
attrs={'axis': axis},
)
else:
check_type(shifts, 'shifts', (list, tuple), 'roll')
helper.append_op(
type='roll',
inputs={'X': x},
outputs={'Out': out},
attrs={'axis': axis, 'shifts': shifts},
)
return out
def stack(x, axis=0, name=None):
"""
Stacks all the input tensors ``x`` along ``axis`` dimemsion.
All tensors must be of the same shape and same dtype.
For example, given N tensors of shape [A, B], if ``axis == 0``, the shape of stacked
tensor is [N, A, B]; if ``axis == 1``, the shape of stacked
tensor is [A, N, B], etc.
.. code-block:: text
Case 1:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 0
Output:
Out.dims = [3, 1, 2]
Out.data =[ [ [1.0, 2.0] ],
[ [3.0, 4.0] ],
[ [5.0, 6.0] ] ]
Case 2:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 1 or axis = -2 # If axis = -2, axis = axis+ndim(x[0])+1 = -2+2+1 = 1.
Output:
Out.shape = [1, 3, 2]
Out.data =[ [ [1.0, 2.0]
[3.0, 4.0]
[5.0, 6.0] ] ]
Args:
x (list[Tensor]|tuple[Tensor]): Input ``x`` can be a ``list`` or ``tuple`` of tensors, the Tensors in ``x``
must be of the same shape and dtype. Supported data types: float32, float64, int32, int64.
axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``,
where ``R`` is the number of dimensions of the first input tensor ``x[0]``.
If ``axis < 0``, ``axis = axis+R+1``. The default value of axis is 0.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, The stacked tensor with same data type as input.
Example:
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1.0, 2.0]])
x2 = paddle.to_tensor([[3.0, 4.0]])
x3 = paddle.to_tensor([[5.0, 6.0]])
out = paddle.stack([x1, x2, x3], axis=0)
print(out.shape) # [3, 1, 2]
print(out)
# [[[1., 2.]],
# [[3., 4.]],
# [[5., 6.]]]
out = paddle.stack([x1, x2, x3], axis=-2)
print(out.shape) # [1, 3, 2]
print(out)
# [[[1., 2.],
# [3., 4.],
# [5., 6.]]]
"""
axis = 0 if axis is None else axis
if in_dynamic_mode():
return _C_ops.stack(x, axis)
else:
if not isinstance(x, list) and not isinstance(x, tuple):
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
# In that case, Variable is array of tensors indeed.
if (
isinstance(x, Variable)
and x.desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY
):
x = [x]
else:
raise TypeError(
"The type of '{}' in {} must be {}, but received {}".format(
'x',
'stack',
'list[Tensor], tuple[Tensor] or TensorArray',
type(x),
)
)
helper = LayerHelper('stack', **locals())
out = helper.create_variable_for_type_inference(x[0].dtype)
if x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
assert len(x) == 1, (
"If the elements of 'x' in stack are Variable(LoDTensorArray), "
"number of the elements must be 1, but received %s." % len(x)
)
out_index = helper.create_variable_for_type_inference(dtype="int32")
for i in x:
check_variable_and_dtype(
i,
'x',
[
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
],
'stack',
)
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': x[0]},
outputs={'Out': [out], 'OutIndex': [out_index]},
attrs={'axis': axis, 'use_stack': True},
)
else:
helper.append_op(
type='stack',
inputs={'X': x},
outputs={'Y': out},
attrs={'axis': axis},
)
return out
def split(x, num_or_sections, axis=0, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
x (Tensor): A N-D Tensor. The data type is bool, bfloat16, float16, float32, float64, uint8, int8, int32 or int64.
num_or_sections (int|list|tuple): If ``num_or_sections`` is an int, then ``num_or_sections``
indicates the number of equal sized sub-Tensors that the ``x`` will be divided into.
If ``num_or_sections`` is a list or tuple, the length of it indicates the number of
sub-Tensors and the elements in it indicate the sizes of sub-Tensors' dimension orderly.
The length of the list must not be larger than the ``x`` 's size of specified ``axis``.
axis (int|Tensor, optional): The axis along which to split, it can be a integer or a ``0-D Tensor``
with shape [] and data type ``int32`` or ``int64``.
If :math::`axis < 0`, the axis to split along is :math:`rank(x) + axis`. Default is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Tensor), The list of segmented Tensors.
Example:
.. code-block:: python
import paddle
# x is a Tensor of shape [3, 9, 5]
x = paddle.rand([3, 9, 5])
out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=1)
print(out0.shape) # [3, 3, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1)
print(out0.shape) # [3, 2, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 4, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1)
print(out0.shape) # [3, 2, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 4, 5]
# axis is negative, the real axis is (rank(x) + axis)=1
out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2)
print(out0.shape) # [3, 3, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
"""
input = x
dim = axis
if in_dynamic_mode():
if isinstance(dim, Variable):
dim = dim.item(0)
assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0"
dim = (len(input.shape) + dim) if dim < 0 else dim
if isinstance(num_or_sections, (list, tuple)):
if paddle.utils._contain_var(num_or_sections):
for index, item in enumerate(num_or_sections):
if isinstance(item, Variable):
num_or_sections[index] = num_or_sections[index].item()
elif not isinstance(num_or_sections, int):
raise TypeError(
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
"received %s." % (type(num_or_sections))
)
if isinstance(num_or_sections, int):
return _C_ops.split_with_num(input, num_or_sections, dim)
else:
return _C_ops.split(input, num_or_sections, dim)
else:
check_variable_and_dtype(
input,
'input',
[
'bool',
'bfloat16',
'float16',
'uint16',
'float32',
'float64',
'int32',
'int64',
'uint8',
'int8',
],
'split',
)
check_type(
num_or_sections, 'num_or_sections', (list, int, tuple), 'split'
)
check_type(dim, 'dim', (int, Variable), 'split')
if isinstance(dim, Variable):
check_dtype(dim.dtype, 'dim', ['int32', 'int64'], 'split')
helper = LayerHelper('split', **locals())
input_shape = input.shape
inputs = {'X': input}
attrs = {
'num': num_or_sections if isinstance(num_or_sections, int) else 0
}
def _get_SectionsTensorList(one_list):
tensor_list = []
unk_dim_idx = -1
for idx, dim_size in enumerate(one_list):
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
tensor_list.append(dim_size)
else:
assert isinstance(dim_size, int)
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one value of 'num_or_section' in split can "
"be -1. But received num_or_section[%d] is also -1."
% idx
)
unk_dim_idx = idx
temp_out = helper.create_variable_for_type_inference(
'int32'
)
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out
)
tensor_list.append(temp_out)
return tensor_list
if isinstance(dim, Variable):
dim.stop_gradient = True
inputs['AxisTensor'] = dim
else:
assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0"
dim = (len(input_shape) + dim) if dim < 0 else dim
attrs['axis'] = dim
if isinstance(num_or_sections, int):
assert num_or_sections > 1, 'num_or_sections must be more than 1.'
if isinstance(dim, int) and input_shape[dim] > 0:
assert input_shape[dim] % num_or_sections == 0, (
"The input's size along the split dimension "
"must be evenly divisible by Attr(num_or_sections). "
"But %d is not evenly divisible by %d. "
% (num_or_sections, input_shape[dim])
)
num = num_or_sections
else:
if isinstance(dim, int) and input_shape[dim] > 0:
assert (
len(num_or_sections) <= input_shape[dim]
), 'len(num_or_sections) must not be more than input.shape[dim].'
num = len(num_or_sections)
attrs['sections'] = [
-1 if isinstance(ele, Variable) else ele
for ele in num_or_sections
]
if paddle.utils._contain_var(num_or_sections):
inputs['SectionsTensorList'] = _get_SectionsTensorList(
num_or_sections
)
outs = [
helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)
for i in range(num)
]
helper.append_op(
type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs
)
return outs
def vsplit(x, num_or_sections, name=None):
"""
Split the input tensor into multiple sub-Tensors along the vertical axis, which is equivalent to ``paddle.split`` with ``axis=0``.
Args:
x (Tensor): A Tensor whose dimension must be greater than 1. The data type is bool, float16, float32, float64, uint8, int8, int32 or int64.
num_or_sections (int|list|tuple): If ``num_or_sections`` is an int, then ``num_or_sections``
indicates the number of equal sized sub-Tensors that the ``x`` will be divided into.
If ``num_or_sections`` is a list or tuple, the length of it indicates the number of
sub-Tensors and the elements in it indicate the sizes of sub-Tensors' dimension orderly.
The length of the list must not be larger than the ``x`` 's size of axis 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list[Tensor], The list of segmented Tensors.
Example:
.. code-block:: python
import paddle
# x is a Tensor of shape [8, 6, 7]
x = paddle.rand([8, 6, 7])
out0, out1 = paddle.vsplit(x, num_or_sections=2)
print(out0.shape) # [4, 6, 7]
print(out1.shape) # [4, 6, 7]
out0, out1, out2 = paddle.vsplit(x, num_or_sections=[1, 3, 4])
print(out0.shape) # [1, 6, 7]
print(out1.shape) # [3, 6, 7]
print(out2.shape) # [4, 6, 7]
out0, out1, out2 = paddle.vsplit(x, num_or_sections=[2, 3, -1])
print(out0.shape) # [2, 6, 7]
print(out1.shape) # [3, 6, 7]
print(out2.shape) # [3, 6, 7]
"""
if x.ndim < 2:
raise ValueError(
"The input tensor's dimension must be greater than 1, but got {}".format(
x.ndim
)
)
return split(x, num_or_sections, axis=0, name=name)
def squeeze(x, axis=None, name=None):
"""
Squeeze the dimension(s) of size 1 of input tensor x's shape.
Note that the output Tensor will share data with origin Tensor and doesn't have a
Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version,
please use `Tensor.clone` like ``squeeze_clone_x = x.squeeze().clone()``.
If axis is provided, it will remove the dimension(s) by given axis that of size 1.
If the dimension of given axis is not of size 1, the dimension remain unchanged.
If axis is not provided, all dims equal of size 1 will be removed.
.. code-block:: text
Case1:
Input:
x.shape = [1, 3, 1, 5] # If axis is not provided, all dims equal of size 1 will be removed.
axis = None
Output:
out.shape = [3, 5]
Case2:
Input:
x.shape = [1, 3, 1, 5] # If axis is provided, it will remove the dimension(s) by given axis that of size 1.
axis = 0
Output:
out.shape = [3, 1, 5]
Case4:
Input:
x.shape = [1, 3, 1, 5] # If the dimension of one given axis (3) is not of size 1, the dimension remain unchanged.
axis = [0, 2, 3]
Output:
out.shape = [3, 5]
Case4:
Input:
x.shape = [1, 3, 1, 5] # If axis is negative, axis = axis + ndim (number of dimensions in x).
axis = [-2]
Output:
out.shape = [1, 3, 5]
Args:
x (Tensor): The input Tensor. Supported data type: float32, float64, bool, int8, int32, int64.
axis (int|list|tuple, optional): An integer or list/tuple of integers, indicating the dimensions to be squeezed. Default is None.
The range of axis is :math:`[-ndim(x), ndim(x))`.
If axis is negative, :math:`axis = axis + ndim(x)`.
If axis is None, all the dimensions of x of size 1 will be removed.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor, Squeezed Tensor with the same data type as input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.rand([5, 1, 10])
output = paddle.squeeze(x, axis=1)
print(x.shape) # [5, 1, 10]
print(output.shape) # [5, 10]
# output shares data with x in dygraph mode
x[0, 0, 0] = 10.
print(output[0, 0]) # [10.]
"""
if axis is None:
axis = []
elif isinstance(axis, int):
axis = [axis]
elif isinstance(axis, tuple):
axis = list(axis)
input = x
axes = axis
if in_dynamic_mode():
return _C_ops.squeeze(input, axes)
else:
helper = LayerHelper("squeeze", **locals())
check_variable_and_dtype(
input,
'input',
[
'float16',
'uint16',
'float32',
'float64',
'bool',
'int8',
'int32',
'int64',
'complex64',
'complex128',
],
'squeeze',
)
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'squeeze')
attrs = {}
if isinstance(axes, Variable):
axes.stop_gradient = True
attrs["axes"] = axes
elif isinstance(axes, (list, tuple)):
if paddle.utils._contain_var(axes):
attrs["axes"] = paddle.utils._convert_to_tensor_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="squeeze2",
inputs={"X": input},
attrs=attrs,
outputs={"Out": out, "XShape": x_shape},
)
return out
@inplace_apis_in_dygraph_only
def squeeze_(x, axis=None, name=None):
"""
Inplace version of ``squeeze`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_squeeze`.
"""
if axis is None:
axis = []
elif isinstance(axis, int):
axis = [axis]
elif isinstance(axis, tuple):
axis = list(axis)
input = x
axes = axis
if in_dynamic_mode():
return _C_ops.squeeze_(input, axes)
def unique_consecutive(
x,
return_inverse=False,
return_counts=False,
axis=None,
dtype="int64",
name=None,
):
"""
Eliminates all but the first element from every consecutive group of equivalent elements.
Note:
This function is different from :ref:`api_paddle_unique` in the sense that this function
only eliminates consecutive duplicate values. This semantics is similar to :ref:`api_paddle_unique` in C++.
Args:
x(Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
return_inverse(bool, optional): If True, also return the indices for where elements in
the original input ended up in the returned unique consecutive tensor. Default is False.
return_counts(bool, optional): If True, also return the counts for each unique consecutive element.
Default is False.
axis(int, optional): The axis to apply unique consecutive. If None, the input will be flattened.
Default is None.
dtype(np.dtype|str, optional): The data type `inverse` tensor: int32 or int64.
Default: int64.
name(str, optional): Name for the operation. For more information, please refer to
:ref:`api_guide_Name`. Default is None.
Returns:
- out (Tensor), the unique consecutive tensor for x.
- inverse (Tensor), the element of the input tensor corresponds to
the index of the elements in the unique consecutive tensor for x.
inverse is provided only if return_inverse is True.
- counts (Tensor), the counts of the every unique consecutive element in the input tensor.
counts is provided only if return_counts is True.
Example:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 1, 2, 2, 3, 1, 1, 2])
output = paddle.unique_consecutive(x) #
print(output)
# Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 3, 1, 2])
_, inverse, counts = paddle.unique_consecutive(x, return_inverse=True, return_counts=True)
print(inverse)
# Tensor(shape=[8], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [0, 0, 1, 1, 2, 3, 3, 4])
print(counts)
# Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [2, 2, 1, 2, 1])
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]])
output = paddle.unique_consecutive(x, axis=0) #
print(output)
# Tensor(shape=[3, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[2, 1, 3],
# [3, 0, 1],
# [2, 1, 3]])
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]])
output = paddle.unique_consecutive(x, axis=0) #
print(output)
# Tensor(shape=[3, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[2, 1, 3],
# [3, 0, 1],
# [2, 1, 3]])
"""
if axis is None:
axis = []
else:
axis = [axis]
attr_dtype = convert_np_dtype_to_dtype_(dtype)
if in_dynamic_mode():
out, inverse, counts = _C_ops.unique_consecutive(
x, return_inverse, return_counts, axis, attr_dtype
)
outs = [out]
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
if len(outs) == 1:
return outs[0]
return tuple(outs)
else:
check_variable_and_dtype(
x,
"input",
['float32', 'float64', 'int32', 'int64'],
'unique_consecutive',
)
check_type(return_inverse, 'return_inverse', bool, 'unique_consecutive')
check_type(return_counts, 'return_counts', bool, 'unique_consecutive')
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'unique_consecutive')
if len(axis) != 0:
check_type(axis[0], 'axis', int, 'unique_consecutive')
helper = LayerHelper('unique_consecutive', **locals())
attrs = {
'dtype': attr_dtype,
"return_inverse": return_inverse,
"return_counts": return_counts,
"axis": axis,
}
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True
)
inverse = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True
)
counts = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True
)
outputs = {"Out": out, "Index": inverse, "Counts": counts}
outs = [out]
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
helper.append_op(
type="unique_consecutive",
inputs={"X": x},
attrs=attrs,
outputs=outputs,
)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def unique(
x,
return_index=False,
return_inverse=False,
return_counts=False,
axis=None,
dtype="int64",
name=None,
):
r"""
Returns the unique elements of `x` in ascending order.
Args:
x(Tensor): The input tensor, it's data type should be float32, float64, int32, int64.
return_index(bool, optional): If True, also return the indices of the input tensor that
result in the unique Tensor.
return_inverse(bool, optional): If True, also return the indices for where elements in
the original input ended up in the returned unique tensor.
return_counts(bool, optional): If True, also return the counts for each unique element.
axis(int, optional): The axis to apply unique. If None, the input will be flattened.
Default: None.
dtype(np.dtype|str, optional): The date type of `indices` or `inverse` tensor: int32 or int64.
Default: int64.
name(str, optional): Name for the operation. For more information, please refer to
:ref:`api_guide_Name`. Default: None.
Returns:
tuple (out, indices, inverse, counts). `out` is the unique tensor for `x`. `indices` is \
provided only if `return_index` is True. `inverse` is provided only if `return_inverse` \
is True. `counts` is provided only if `return_counts` is True.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 3, 1, 5, 3])
unique = paddle.unique(x)
print(unique)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 3, 5])
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
print(indices)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [3, 0, 1, 4])
print(inverse)
# Tensor(shape=[6], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 2, 2, 0, 3, 2])
print(counts)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [1, 1, 3, 1])
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]])
unique = paddle.unique(x)
print(unique)
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [0, 1, 2, 3])
unique = paddle.unique(x, axis=0)
print(unique)
# Tensor(shape=[2, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[2, 1, 3],
# [3, 0, 1]])
"""
if axis is None:
axis = []
else:
axis = [axis]
attr_dtype = convert_np_dtype_to_dtype_(dtype)
if in_dynamic_mode():
out, indices, inverse, counts = _C_ops.unique(
x, return_index, return_inverse, return_counts, axis, attr_dtype
)
outs = [out]
if return_index:
outs.append(indices)
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
if len(outs) == 1:
return outs[0]
return tuple(outs)
else:
check_variable_and_dtype(
x,
"input",
['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'],
'unique',
)
check_type(return_index, 'return_index', bool, 'unique')
check_type(return_inverse, 'return_inverse', bool, 'unique')
check_type(return_counts, 'return_counts', bool, 'unique')
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'unique')
if len(axis) != 0:
check_type(axis[0], 'axis', int, 'unique')
helper = LayerHelper('unique', **locals())
attrs = {
'dtype': attr_dtype,
"return_index": return_index,
"return_inverse": return_inverse,
"return_counts": return_counts,
"axis": axis,
"is_sorted": True,
}
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True
)
indices = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True
)
inverse = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True
)
counts = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True
)
outputs = {
"Out": out,
"Indices": indices,
"Index": inverse,
"Counts": counts,
}
outs = [out]
if return_index:
outs.append(indices)
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
helper.append_op(
type="unique", inputs={"X": x}, attrs=attrs, outputs=outputs
)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def unsqueeze(x, axis, name=None):
"""
Insert single-dimensional entries to the shape of input Tensor ``x``. Takes one
required argument axis, a dimension or list of dimensions that will be inserted.
Dimension indices in axis are as seen in the output tensor.
Note that the output Tensor will share data with origin Tensor and doesn't have a
Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version,
please use `Tensor.clone` like ``unsqueeze_clone_x = x.unsqueeze(-1).clone()``.
Args:
x (Tensor): The input Tensor to be unsqueezed. Supported data type: bfloat16, float16, float32, float64, bool, int8, int32, int64.
axis (int|list|tuple|Tensor): Indicates the dimensions to be inserted. The data type is ``int32`` .
If ``axis`` is a list or tuple, each element of it should be integer or 0-D Tensor with shape [].
If ``axis`` is a Tensor, it should be an 1-D Tensor .
If ``axis`` is negative, ``axis = axis + ndim(x) + 1``.
name (str|None): Name for this layer. Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor, Unsqueezed Tensor with the same data type as input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.rand([5, 10])
print(x.shape) # [5, 10]
out1 = paddle.unsqueeze(x, axis=0)
print(out1.shape) # [1, 5, 10]
out2 = paddle.unsqueeze(x, axis=[0, 2])
print(out2.shape) # [1, 5, 1, 10]
axis = paddle.to_tensor([0, 1, 2])
out3 = paddle.unsqueeze(x, axis=axis)
print(out3.shape) # [1, 1, 1, 5, 10]
# out1, out2, out3 share data with x in dygraph mode
x[0, 0] = 10.
print(out1[0, 0, 0]) # [10.]
print(out2[0, 0, 0, 0]) # [10.]
print(out3[0, 0, 0, 0, 0]) # [10.]
"""
input = x
axes = axis
if in_dynamic_mode():
if isinstance(axes, int):
axes = [axes]
elif isinstance(axes, Variable):
axes = axes.tolist()
elif isinstance(axes, (list, tuple)):
axes = [
item.item(0) if isinstance(item, Variable) else item
for item in axes
]
return _C_ops.unsqueeze(input, axes)
else:
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
check_variable_and_dtype(
input,
'input',
[
'uint16',
'float16',
'uint16',
'float32',
'float64',
'bool',
'int8',
'int16',
'int32',
'int64',
'complex64',
'complex128',
],
'unsqueeze',
)
helper = LayerHelper("unsqueeze2", **locals())
inputs = {"X": input}
attrs = {}
if isinstance(axes, int):
axes = [axes]
if isinstance(axes, Variable):
axes.stop_gradient = True
inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)):
if paddle.utils._contain_var(axes):
inputs["AxesTensorList"] = paddle.utils._convert_to_tensor_list(
axes
)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="unsqueeze2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out, "XShape": x_shape},
)
return out
@inplace_apis_in_dygraph_only
def unsqueeze_(x, axis, name=None):
"""
Inplace version of ``unsqueeze`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_unsqueeze`.
"""
input = x
axes = axis
if isinstance(axes, int):
axes = [axes]
elif isinstance(axes, Variable):
axes = axes.tolist()
elif isinstance(axes, (list, tuple)):
axes = [
item.item(0) if isinstance(item, Variable) else item
for item in axes
]
return _C_ops.unsqueeze_(input, axes)
def gather(x, index, axis=None, name=None):
"""
Output is obtained by gathering entries of ``axis``
of ``x`` indexed by ``index`` and concatenate them together.
.. code-block:: text
Given:
x = [[1, 2],
[3, 4],
[5, 6]]
index = [1, 2]
axis=[0]
Then:
out = [[3, 4],
[5, 6]]
Args:
x (Tensor): The source input tensor with rank>=1. Supported data type is
int32, int64, float32, float64 and uint8 (only for CPU),
float16 (only for GPU).
index (Tensor): The index input tensor with rank=0 or rank=1. Data type is int32 or int64.
axis (Tensor|int, optional): The axis of input to be gathered, it's can be int or a Tensor with data type is int32 or int64. The default value is None, if None, the ``axis`` is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
output (Tensor), If the index is a 1-D tensor, the output is a tensor with the same shape as ``x``. If the index is a 0-D tensor, the output will reduce the dimension where the axis pointing.
Examples:
.. code-block:: python
import paddle
input = paddle.to_tensor([[1,2],[3,4],[5,6]])
index = paddle.to_tensor([0,1])
output = paddle.gather(input, index, axis=0)
# expected output: [[1,2],[3,4]]
"""
if axis is None:
axis = 0
if in_dynamic_mode():
return _C_ops.gather(x, index, axis)
else:
check_variable_and_dtype(
x,
'x',
[
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'gather',
)
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather')
if isinstance(axis, Variable):
check_variable_and_dtype(axis, 'axis', ['int32', 'int64'], 'gather')
helper = LayerHelper('gather', **locals())
dtype = helper.input_dtype('x')
out = helper.create_variable_for_type_inference(dtype)
if not isinstance(axis, Variable):
helper.append_op(
type="gather",
inputs={"X": x, "Index": index},
attrs={'axis': axis, 'overwrite': False},
outputs={"Out": out},
)
else:
helper.append_op(
type="gather",
inputs={"X": x, "Index": index, "Axis": axis},
attrs={"overwrite": False},
outputs={"Out": out},
)
return out
def unbind(input, axis=0):
"""
Removes a tensor dimension, then split the input tensor into multiple sub-Tensors.
Args:
input (Tensor): The input variable which is an N-D Tensor, data type being bool, float16, float32, float64, int32 or int64.
axis (int32|int64, optional): A scalar with type ``int32|int64`` shape [1]. The dimension along which to unbind.
If :math:`axis < 0`, the dimension to unbind along is :math:`rank(input) + axis`. Default is 0.
Returns:
list(Tensor), The list of segmented Tensor variables.
Example:
.. code-block:: python
import paddle
# input is a Tensor which shape is [3, 4, 5]
input = paddle.rand([3, 4, 5])
[x0, x1, x2] = paddle.unbind(input, axis=0)
# x0.shape [4, 5]
# x1.shape [4, 5]
# x2.shape [4, 5]
[x0, x1, x2, x3] = paddle.unbind(input, axis=1)
# x0.shape [3, 5]
# x1.shape [3, 5]
# x2.shape [3, 5]
# x3.shape [3, 5]
"""
if not isinstance(axis, (int)):
raise TypeError(
"The type of 'axis' must be int, but received %s." % (type(axis))
)
if axis not in range(-input.ndim, input.ndim):
raise ValueError(
f'The axis must in range({-input.ndim}, {input.ndim}).'
)
if in_dynamic_mode():
return _C_ops.unbind(input, axis)
else:
if isinstance(axis, np.generic):
axis = np.asscalar(axis)
input_shape = input.shape
axis_ = axis if axis >= 0 else len(input_shape) + axis
num = input_shape[axis_]
helper = LayerHelper("unbind", **locals())
check_type(input, 'input', (Variable), 'unbind')
dtype = helper.input_dtype()
check_dtype(
dtype,
'unbind',
[
'bool',
'float16',
'uint16',
'float32',
'float64',
'int32',
'int64',
],
'unbind',
)
outs = [
helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)
for i in range(num)
]
helper.append_op(
type="unbind",
inputs={"X": input},
outputs={"Out": outs},
attrs={"axis": axis},
)
return outs
def scatter(x, index, updates, overwrite=True, name=None):
"""
**Scatter Layer**
Output is obtained by updating the input on selected indices based on updates.
.. code-block:: python
:name: code-example1
import paddle
#input:
x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32')
index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
# shape of updates should be the same as x
# shape of updates with dim > 1 should be the same as input
updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32')
overwrite = False
# calculation:
if not overwrite:
for i in range(len(index)):
x[index[i]] = paddle.zeros([2])
for i in range(len(index)):
if (overwrite):
x[index[i]] = updates[i]
else:
x[index[i]] += updates[i]
# output:
out = paddle.to_tensor([[3, 3], [6, 6], [1, 1]])
out.shape # [3, 2]
**NOTICE**: The order in which updates are applied is nondeterministic,
so the output will be nondeterministic if index contains duplicates.
Args:
x (Tensor): The input N-D Tensor with ndim>=1. Data type can be float32, float64.
index (Tensor): The index is a 1-D or 0-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.
updates (Tensor): Update input with updates parameter based on index. When the index is a 1-D tensor, the updates shape should be the same as input, and dim value with dim > 1 should be the same as input. When the index is a 0-D tensor, the updates should be a (N-1)-D tensor, the ith dim of the updates should be queal with the (i+1)th dim of the input.
overwrite (bool, optional): The mode that updating the output when there are same indices.
If True, use the overwrite mode to update the output of the same index,
if False, use the accumulate mode to update the output of the same index. Default value is True.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor, The output is a Tensor with the same shape as x.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32')
index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32')
output1 = paddle.scatter(x, index, updates, overwrite=False)
# [[3., 3.],
# [6., 6.],
# [1., 1.]]
output2 = paddle.scatter(x, index, updates, overwrite=True)
# CPU device:
# [[3., 3.],
# [4., 4.],
# [1., 1.]]
# GPU device maybe have two results because of the repeated numbers in index
# result 1:
# [[3., 3.],
# [4., 4.],
# [1., 1.]]
# result 2:
# [[3., 3.],
# [2., 2.],
# [1., 1.]]
"""
if in_dynamic_mode():
return _C_ops.scatter(x, index, updates, overwrite)
else:
check_variable_and_dtype(
x,
'dtype',
['float32', 'float64', 'float16', 'int32', 'int64', 'uint16'],
'scatter',
)
check_type(overwrite, 'overwrite', bool, 'scatter')
helper = LayerHelper('scatter', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type="scatter",
inputs={"X": x, "Ids": index, "Updates": updates},
attrs={'overwrite': overwrite},
outputs={"Out": out},
)
return out
@inplace_apis_in_dygraph_only
def scatter_(x, index, updates, overwrite=True, name=None):
"""
Inplace version of ``scatter`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_scatter`.
"""
return _C_ops.scatter_(x, index, updates, overwrite)
def scatter_nd_add(x, index, updates, name=None):
r"""
Output is obtained by applying sparse addition to a single value
or slice in a Tensor.
:attr:`x` is a Tensor with ndim :math:`R`
and :attr:`index` is a Tensor with ndim :math:`K` . Thus, :attr:`index`
has shape :math:`[i_0, i_1, ..., i_{K-2}, Q]` where :math:`Q \leq R` . :attr:`updates`
is a Tensor with ndim :math:`K - 1 + R - Q` and its
shape is :math:`index.shape[:-1] + x.shape[index.shape[-1]:]` .
According to the :math:`[i_0, i_1, ..., i_{K-2}]` of :attr:`index` ,
add the corresponding :attr:`updates` slice to the :attr:`x` slice
which is obtained by the last one dimension of :attr:`index` .
.. code-block:: text
Given:
* Case 1:
x = [0, 1, 2, 3, 4, 5]
index = [[1], [2], [3], [1]]
updates = [9, 10, 11, 12]
we get:
output = [0, 22, 12, 14, 4, 5]
* Case 2:
x = [[65, 17], [-14, -25]]
index = [[], []]
updates = [[[-1, -2], [1, 2]],
[[3, 4], [-3, -4]]]
x.shape = (2, 2)
index.shape = (2, 0)
updates.shape = (2, 2, 2)
we get:
output = [[67, 19], [-16, -27]]
Args:
x (Tensor): The x input. Its dtype should be int32, int64, float32, float64.
index (Tensor): The index input with ndim > 1 and index.shape[-1] <= x.ndim.
Its dtype should be int32 or int64 as it is used as indexes.
updates (Tensor): The updated value of scatter_nd_add op, and it must have the same dtype
as x. It must have the shape index.shape[:-1] + x.shape[index.shape[-1]:].
name (str|None): The output tensor name. If set None, the layer will be named automatically.
Returns:
output (Tensor), The output is a tensor with the same shape and dtype as x.
Examples:
.. code-block:: python
import paddle
x = paddle.rand(shape=[3, 5, 9, 10], dtype='float32')
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
index = paddle.to_tensor([[1, 1],
[0, 1],
[1, 3]], dtype='int64')
output = paddle.scatter_nd_add(x, index, updates)
print(output.shape)
# [3, 5, 9, 10]
"""
if in_dynamic_mode():
return _C_ops.scatter_nd_add(x, index, updates)
else:
if x.dtype != updates.dtype:
raise ValueError("x and updates must have same data type.")
helper = LayerHelper('scatter_nd_add', **locals())
dtype = helper.input_dtype(input_param_name='x')
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="scatter_nd_add",
inputs={"X": x, "Index": index, "Updates": updates},
outputs={"Out": output},
)
return output
def scatter_nd(index, updates, shape, name=None):
"""
**Scatter_nd Layer**
Output is obtained by scattering the :attr:`updates` in a new tensor according
to :attr:`index` . This op is similar to :code:`scatter_nd_add`, except the
tensor of :attr:`shape` is zero-initialized. Correspondingly, :code:`scatter_nd(index, updates, shape)`
is equal to :code:`scatter_nd_add(paddle.zeros(shape, updates.dtype), index, updates)` .
If :attr:`index` has repeated elements, then the corresponding updates are accumulated.
Because of the numerical approximation issues, the different order of repeated elements
in :attr:`index` may cause different results. The specific calculation method can be
seen :code:`scatter_nd_add` . This op is the inverse of the :code:`gather_nd` op.
Args:
index (Tensor): The index input with ndim >= 1 and index.shape[-1] <= len(shape).
Its dtype should be int32 or int64 as it is used as indexes.
updates (Tensor): The updated value of scatter_nd op. Its dtype should be float32, float64.
It must have the shape index.shape[:-1] + shape[index.shape[-1]:]
shape(tuple|list): Shape of output tensor.
name (str|None): The output Tensor name. If set None, the layer will be named automatically.
Returns:
output (Tensor), The output is a tensor with the same type as :attr:`updates` .
Examples:
.. code-block:: python
import paddle
index = paddle.to_tensor([[1, 1],
[0, 1],
[1, 3]], dtype="int64")
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
shape = [3, 5, 9, 10]
output = paddle.scatter_nd(index, updates, shape)
"""
return scatter_nd_add(zeros(shape, updates.dtype), index, updates, name)
def chunk(x, chunks, axis=0, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
x (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.
chunks(int): The number of tensor to be split along the certain axis.
axis (int|Tensor, optional): The axis along which to split, it can be a integer or a ``0-D Tensor``
with shape [] and data type ``int32`` or ``int64``.
If :math::`axis < 0`, the axis to split along is :math:`rank(x) + axis`. Default is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Tensor), The list of segmented Tensors.
Examples:
.. code-block:: python
import paddle
x = paddle.rand([3, 9, 5])
out0, out1, out2 = paddle.chunk(x, chunks=3, axis=1)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
# axis is negative, the real axis is (rank(x) + axis) which real
# value is 1.
out0, out1, out2 = paddle.chunk(x, chunks=3, axis=-2)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
"""
check_type(chunks, 'chunks', (int), 'chunk')
return split(x, num_or_sections=chunks, axis=axis, name=name)
def tile(x, repeat_times, name=None):
"""
Construct a new Tensor by repeating ``x`` the number of times given by ``repeat_times``.
After tiling, the value of the i'th dimension of the output is equal to ``x.shape[i]*repeat_times[i]``.
Both the number of dimensions of ``x`` and the number of elements in ``repeat_times`` should be less than or equal to 6.
Args:
x (Tensor): The input tensor, its data type should be bool, float16, float32, float64, int32 or int64.
repeat_times (list|tuple|Tensor): The number of repeating times. If repeat_times is a list or tuple, all its elements
should be integers or 1-D Tensors with the data type int32. If repeat_times is a Tensor, it should be an 1-D Tensor with the data type int32.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. The data type is the same as ``x``. The size of the i-th dimension is equal to ``x[i] * repeat_times[i]``.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.tile(data, repeat_times=[2, 1])
print(out)
# Tensor(shape=[2, 3], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [[1, 2, 3],
# [1, 2, 3]])
out = paddle.tile(data, repeat_times=(2, 2))
print(out)
# Tensor(shape=[2, 6], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [[1, 2, 3, 1, 2, 3],
# [1, 2, 3, 1, 2, 3]])
repeat_times = paddle.to_tensor([1, 2], dtype='int32')
out = paddle.tile(data, repeat_times=repeat_times)
print(out)
# Tensor(shape=[1, 6], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [[1, 2, 3, 1, 2, 3]])
"""
if in_dynamic_mode():
if isinstance(repeat_times, core.eager.Tensor):
assert (
repeat_times.ndim == 1
), "Only support ndim == 1 while repeat_times is a Tensor."
repeat_times = repeat_times.tolist()
return _C_ops.tile(x, repeat_times)
else:
check_type(
repeat_times, 'repeat_times', (list, tuple, Variable), 'tile'
)
if isinstance(repeat_times, Variable):
assert (
repeat_times.numel() == 1
), 'repeat_times must be a Tensor with one element.'
else:
for elem in repeat_times:
if isinstance(elem, Variable):
assert (
elem.numel() == 1
), 'Elements in repeat_times must be Tensor with one element or integers.'
else:
type_tuple = (int, np.int32, np.int64)
assert isinstance(
elem, type_tuple
), 'Elements in repeat_times must be Tensor with one element or integers.'
check_variable_and_dtype(
x,
'x',
[
'bool',
'float16',
'uint16',
'float32',
'float64',
'int32',
'int64',
],
'tile',
)
if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient:
raise ValueError(
"When the date type is bool for the input 'x' of tile op, you "
"must set its stop_gradient to be True by "
"some_var.stop_gradient == True supporting some_var is the input."
)
helper = LayerHelper('tile', **locals())
inputs = {"X": [x]}
attrs = {}
def get_attr_repeat_times(list_repeat_times):
attrs_repeat_times = []
for idx, times in enumerate(list_repeat_times):
if isinstance(times, Variable):
attrs_repeat_times.append(-1)
else:
attrs_repeat_times.append(times)
assert (
times > 0
), "All elements in repeat_times must be positive for tile."
return attrs_repeat_times
if isinstance(repeat_times, Variable):
repeat_times.stop_gradient = True
inputs['RepeatTimes'] = repeat_times
attrs['repeat_times'] = [-1]
elif isinstance(repeat_times, (list, tuple)):
attrs['repeat_times'] = get_attr_repeat_times(repeat_times)
if paddle.utils._contain_var(repeat_times):
inputs[
'repeat_times_tensor'
] = paddle.utils._convert_to_tensor_list(repeat_times)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='tile', inputs=inputs, outputs={'Out': out}, attrs=attrs
)
return out
def expand_as(x, y, name=None):
"""
Expand the input tensor ``x`` to the same shape as the input tensor ``y``.
Both the number of dimensions of ``x`` and ``y`` must be less than or equal to 6, and the number of dimensions of ``y`` must be greather than or equal to that of ``x``. The dimension to expand must have a value of 0.
Args:
x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.
y (Tensor): The input tensor that gives the shape to expand to.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor, A Tensor with the same shape as ``y``. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
data_x = paddle.to_tensor([1, 2, 3], 'int32')
data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32')
out = paddle.expand_as(data_x, data_y)
print(out)
# Tensor(shape=[2, 3], dtype=int32, place=Place(gpu:0), stop_gradient=True,
# [[1, 2, 3],
# [1, 2, 3]])
"""
if in_dynamic_mode():
return _C_ops.expand_as(x, None, y.shape)
else:
check_variable_and_dtype(
x,
'x',
[
'bool',
'float32',
'float64',
'int32',
'int64',
'float16',
'uint16',
],
'expand_as',
)
check_type(y, 'y', Variable, 'expand_as')
if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient:
raise ValueError(
"When the data type of input 'x' for expand_as is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input 'x'."
)
inputs = {"X": [x], "Y": [y]}
helper = LayerHelper('expand_as', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_as_v2',
inputs=inputs,
attrs={'target_shape': y.shape},
outputs={'Out': out},
)
return out
def broadcast_to(x, shape, name=None):
"""
Broadcast the input tensor to a given shape.
Both the number of dimensions of ``x`` and the number of elements in ``shape`` should be less than or equal to 6. The dimension to broadcast to must have a value 0.
Args:
x (Tensor): The input tensor, its data type is bool, float16, float32, float64, int32 or int64.
shape (list|tuple|Tensor): The result shape after broadcasting. The data type is int32. If shape is a list or tuple, all its elements
should be integers or 0-D or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32.
The value -1 in shape means keeping the corresponding dimension unchanged.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor, A Tensor with the given shape. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.broadcast_to(data, shape=[2, 3])
print(out)
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dynamic_mode():
return _C_ops.expand(x, shape)
else:
if isinstance(shape, Variable):
assert len(shape.shape) == 1, 'shape must be an 1-D Tensor.'
else:
type_tuple = (int, np.int32, np.int64)
for elem in shape:
if isinstance(elem, Variable):
assert (
len(elem.shape) == 1
), 'Elements in shape must be 1-D Tensors or integers.'
else:
assert isinstance(
elem, type_tuple
), 'Elements in shape must be 1-D Tensors or integers.'
check_variable_and_dtype(
x,
'x',
[
'bool',
'uint16',
'float16',
'float32',
'float64',
'int32',
'int64',
],
'broadcast_to',
)
check_type(shape, 'shape', (list, tuple, Variable), 'broadcast_to')
if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient:
raise ValueError(
"When the data type of input 'x' for broadcast_to is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input."
)
inputs = {"X": [x]}
attrs = {}
helper = LayerHelper('expand', **locals())
def get_attr_expand_shape(list_expand_shape):
attrs_expand_shape = []
for idx, shape in enumerate(list_expand_shape):
if isinstance(shape, Variable):
attrs_expand_shape.append(-1)
else:
attrs_expand_shape.append(shape)
assert (
shape > 0 or shape == -1
), "All elements in shape of broadcast_to must be positive or -1."
return attrs_expand_shape
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs['Shape'] = shape
elif isinstance(shape, (list, tuple)):
attrs['shape'] = get_attr_expand_shape(shape)
if paddle.utils._contain_var(shape):
inputs[
'expand_shapes_tensor'
] = paddle.utils._convert_to_tensor_list(shape)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs
)
return out
def expand(x, shape, name=None):
"""
Expand the input tensor to a given shape.
Both the number of dimensions of ``x`` and the number of elements in ``shape`` should be less than or equal to 6. And the number of dimensions of ``x`` should be less than the number of elements in ``shape``. The dimension to expand must have a value 0.
Args:
x (Tensor): The input Tensor, its data type is bool, float32, float64, int32 or int64.
shape (list|tuple|Tensor): The result shape after expanding. The data type is int32. If shape is a list or tuple, all its elements
should be integers or 0-D or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32.
The value -1 in shape means keeping the corresponding dimension unchanged.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
N-D Tensor, A Tensor with the given shape. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.expand(data, shape=[2, 3])
print(out)
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dynamic_mode():
return _C_ops.expand(x, shape)
else:
if isinstance(shape, Variable):
assert shape.numel() == 1, 'shape must be a Tensor with one element'
else:
for elem in shape:
if isinstance(elem, Variable):
assert (
elem.numel() == 1
), 'Elements in shape must be Tensor with one element or integers.'
else:
type_tuple = (int, np.int32, np.int64)
assert isinstance(
elem, type_tuple
), 'Elements in shape must be Tensor with one element or integers.'
check_variable_and_dtype(
x,
'x',
[
'bool',
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
],
'expand',
)
check_type(shape, 'shape', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient:
raise ValueError(
"When the data type of input 'x' for expand is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input."
)
inputs = {"X": [x]}
attrs = {}
helper = LayerHelper('expand', **locals())
def get_attr_expand_shape(list_expand_shape):
attrs_expand_shape = []
for idx, shape in enumerate(list_expand_shape):
if isinstance(shape, Variable):
attrs_expand_shape.append(-2)
else:
attrs_expand_shape.append(shape)
assert (
shape > 0 or shape == -1
), "All elements in shape of expand must be positive or -1."
return attrs_expand_shape
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs['Shape'] = shape
elif isinstance(shape, (list, tuple)):
attrs['shape'] = get_attr_expand_shape(shape)
if paddle.utils._contain_var(shape):
inputs[
'expand_shapes_tensor'
] = paddle.utils._convert_to_tensor_list(shape)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs
)
return out
def reshape(x, shape, name=None):
"""
Changes the shape of ``x`` without changing its data.
Note that the output Tensor will share data with origin Tensor and doesn't
have a Tensor copy in ``dygraph`` mode.
If you want to use the Tensor copy version, please use `Tensor.clone` like
``reshape_clone_x = x.reshape([-1]).clone()``.
Some tricks exist when specifying the target shape.
- 1. -1 means the value of this dimension is inferred from the total element number of x and remaining dimensions. Thus one and only one dimension can be set -1.
- 2. 0 means the actual dimension value is going to be copied from the corresponding dimension of x. The index of 0s in shape can not exceed the dimension of x.
Here are some examples to explain it.
- 1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape is [6, 8], the reshape operator will transform x into a 2-D tensor with shape [6, 8] and leaving x's data unchanged.
- 2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape specified is [2, 3, -1, 2], the reshape operator will transform x into a 4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this case, one dimension of the target shape is set to -1, the value of this dimension is inferred from the total element number of x and remaining dimensions.
- 3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case, besides -1, 0 means the actual dimension value is going to be copied from the corresponding dimension of x.
Args:
x (Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool``
shape (list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1.
The data type is ``int32`` . If ``shape`` is a list or tuple, each element of it should be integer or Tensor with shape [].
If ``shape`` is an Tensor, it should be an 1-D Tensor .
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, A reshaped Tensor with the same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.rand([2, 4, 6], dtype="float32")
positive_four = paddle.full([1], 4, "int32")
out = paddle.reshape(x, [-1, 0, 3, 2])
print(out)
# the shape is [2,4,3,2].
out = paddle.reshape(x, shape=[positive_four, 12])
print(out)
# the shape of out_2 is [4, 12].
shape_tensor = paddle.to_tensor([8, 6], dtype=paddle.int32)
out = paddle.reshape(x, shape=shape_tensor)
print(out.shape)
# the shape is [8, 6].
# out shares data with x in dygraph mode
x[0, 0, 0] = 10.
print(out[0, 0])
# the value is [10.]
"""
if in_dynamic_mode():
if isinstance(shape, (list, tuple)):
new_shape = []
for ele in shape:
if isinstance(ele, core.eager.Tensor):
new_shape.append(ele.item())
else:
new_shape.append(ele)
if new_shape == x.shape:
out = x
else:
out = _C_ops.reshape(x, new_shape)
elif isinstance(shape, core.eager.Tensor):
shape.stop_gradient = True
out = _C_ops.reshape(x, shape)
else:
raise ValueError(
"shape must be an instance of `list`, `tuple` or `Variable`,"
" got '{}.'".format(type(shape))
)
return out
else:
check_variable_and_dtype(
x,
'x',
[
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
'bool',
'uint16',
],
'reshape',
)
check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
def get_attr_shape(list_shape):
unk_dim_idx = -1
attrs_shape = []
for dim_idx, dim_size in enumerate(list_shape):
if isinstance(dim_size, Variable):
attrs_shape.append(-1)
else:
attrs_shape.append(dim_size)
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one dimension value of 'shape' in reshape can "
"be -1. But received shape[%d] is also -1.\n"
"\n\t# N = x.shape()[2]\t\t# N is an int. "
"(NOT recommend under @to_static)\n\tN = paddle.shape(x)[2]\t\t"
"# N is a Tensor. (Recommend)\n\tz = paddle.reshape([N, -1, 4])"
"\t# z.shape is [-1, -1, 4]\n\n"
" If your target shape in Reshape represents dynamic shape, "
"please turn it into a Tensor under @to_static. See above example for details."
% dim_idx
)
unk_dim_idx = dim_idx
elif dim_size == 0:
assert dim_idx < len(x.shape), (
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape[%d] = 0, X's dimensions = %d."
% (dim_idx, len(x.shape))
)
else:
assert dim_size > 0, (
"Each dimension value of 'shape' in reshape must not "
"be negative except one unknown dimension. "
"But received shape[%d] = %s."
% (dim_idx, str(dim_size))
)
return attrs_shape
inputs = {"X": x}
attrs = {}
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["Shape"] = shape
elif isinstance(shape, (list, tuple)):
attrs["shape"] = get_attr_shape(shape)
if paddle.utils._contain_var(shape):
inputs['ShapeTensor'] = paddle.utils._convert_to_tensor_list(
shape
)
helper = LayerHelper("reshape2", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="reshape2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out, "XShape": x_shape},
)
return out
@inplace_apis_in_dygraph_only
def reshape_(x, shape, name=None):
"""
Inplace version of ``reshape`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_reshape`.
"""
if in_dynamic_mode():
tmp_tensor_type = core.eager.Tensor
if isinstance(shape, (list, tuple)):
shape = [
item.item(0) if isinstance(item, tmp_tensor_type) else item
for item in shape
]
if shape == x.shape:
out = x
else:
out = _C_ops.reshape_(x, shape)
elif isinstance(shape, tmp_tensor_type):
shape.stop_gradient = True
out = _C_ops.reshape_(x, shape)
else:
raise ValueError(
"shape must be an instance of `list`, `tuple` or `Variable`,"
" got '{}.'".format(type(shape))
)
return out
def gather_nd(x, index, name=None):
"""
This function is actually a high-dimensional extension of :code:`gather`
and supports for simultaneous indexing by multiple axes. :attr:`index` is a
K-dimensional integer tensor, which is regarded as a (K-1)-dimensional
tensor of :attr:`index` into :attr:`input`, where each element defines
a slice of params:
.. math::
output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]]
Obviously, :code:`index.shape[-1] <= input.rank` . And, the output tensor has
shape :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` .
.. code-block:: text
Given:
x = [[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]]
x.shape = (2, 3, 4)
* Case 1:
index = [[1]]
gather_nd(x, index)
= [x[1, :, :]]
= [[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]
* Case 2:
index = [[0,2]]
gather_nd(x, index)
= [x[0, 2, :]]
= [8, 9, 10, 11]
* Case 3:
index = [[1, 2, 3]]
gather_nd(x, index)
= [x[1, 2, 3]]
= [23]
Args:
x (Tensor): The input Tensor which it's data type should be bool, float16, float32, float64, int32, int64.
index (Tensor): The index input with rank > 1, index.shape[-1] <= input.rank.
Its dtype should be int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
output (Tensor), A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:]
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10], [11, 12]]])
index = paddle.to_tensor([[0, 1]])
output = paddle.gather_nd(x, index) #[[3, 4]]
"""
if in_dynamic_mode():
return _C_ops.gather_nd(x, index)
else:
check_variable_and_dtype(
x,
'x',
[
'bool',
'float16',
'uint16',
'float32',
'float64',
'int16',
'int32',
'int64',
],
'gather_np',
)
check_variable_and_dtype(
index, 'index', ['int32', 'int64'], 'gather_np'
)
helper = LayerHelper('gather_nd', **locals())
dtype = helper.input_dtype()
output = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather_nd",
inputs={"X": x, "Index": index},
outputs={"Out": output},
)
return output
def strided_slice(x, axes, starts, ends, strides, name=None):
"""
This operator produces a slice of ``x`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of
slicing and if the ``strides`` is negative, slice operation is in the opposite direction.
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``.
Following examples will explain how strided_slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
strides = [1, 1]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [2, 0]
strides = [1, -1]
Then:
result = [ [8, 7, 6], ]
Case3:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000]
strides = [1, 3]
Then:
result = [ [2], ]
Args:
x (Tensor): An N-D ``Tensor``. The data type is ``bool``, ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
starts (list|tuple|Tensor): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of it should be
integers or Tensors with shape []. If ``starts`` is an Tensor, it should be an 1-D Tensor.
It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Tensor): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of it should be
integers or Tensors with shape []. If ``ends`` is an Tensor, it should be an 1-D Tensor.
It represents ending indices of corresponding axis in ``axes``.
strides (list|tuple|Tensor): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of it should be
integers or Tensors with shape []. If ``strides`` is an Tensor, it should be an 1-D Tensor.
It represents slice step of corresponding axis in ``axes``.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor, A ``Tensor`` with the same dimension as ``x``. The data type is same as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.zeros(shape=[3,4,5,6], dtype="float32")
# example 1:
# attr starts is a list which doesn't contain Tensor.
axes = [1, 2, 3]
starts = [-3, 0, 2]
ends = [3, 2, 4]
strides_1 = [1, 1, 1]
strides_2 = [1, 1, 2]
sliced_1 = paddle.strided_slice(x, axes=axes, starts=starts, ends=ends, strides=strides_1)
# sliced_1 is x[:, 1:3:1, 0:2:1, 2:4:1].
# example 2:
# attr starts is a list which contain tensor Tensor.
minus_3 = paddle.full(shape=[1], fill_value=-3, dtype='int32')
sliced_2 = paddle.strided_slice(x, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)
# sliced_2 is x[:, 1:3:1, 0:2:1, 2:4:2].
"""
if in_dynamic_mode():
return _C_ops.strided_slice(x, axes, starts, ends, strides)
else:
helper = LayerHelper('strided_slice', **locals())
check_variable_and_dtype(
x,
'x',
[
'bool',
'float16',
'uint16',
'float32',
'float64',
'int32',
'int64',
],
'strided_slice',
)
check_type(axes, 'axes', (list, tuple), 'strided_slice')
check_type(starts, 'starts', (list, tuple, Variable), 'strided_slice')
check_type(ends, 'ends', (list, tuple, Variable), 'strided_slice')
check_type(strides, 'strides', (list, tuple, Variable), 'strided_slice')
def check_list_elements_dtype(list_input, input_name):
if isinstance(list_input, Variable):
check_dtype(
list_input.dtype,
input_name,
['int32', 'int64'],
'strided_slice',
)
else:
for i, var in enumerate(list_input):
var_name = input_name + '[' + str(i) + ']'
if isinstance(var, Variable):
check_dtype(
var.dtype, var_name, ['int32'], 'strided_slice'
)
check_list_elements_dtype(axes, 'axes')
check_list_elements_dtype(starts, 'starts')
check_list_elements_dtype(ends, 'ends')
check_list_elements_dtype(strides, 'strides')
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_list_tensor.append(dim)
else:
assert isinstance(dim, int)
temp_out = helper.create_variable_for_type_inference(
'int32'
)
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out
)
new_list_tensor.append(temp_out)
return new_list_tensor
inputs = {'Input': x}
attrs = {'axes': axes}
infer_flags = [1 for i in range(len(axes))]
# starts
if isinstance(starts, Variable):
starts.stop_gradient = True
inputs['StartsTensor'] = starts
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if paddle.utils._contain_var(starts):
inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
ends.stop_gradient = True
inputs['EndsTensor'] = ends
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if paddle.utils._contain_var(ends):
inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# strides
if isinstance(strides, Variable):
strides.stop_gradient = True
inputs['StridesTensor'] = strides
elif isinstance(strides, (list, tuple)):
attrs['strides'] = []
if paddle.utils._contain_var(strides):
inputs['StridesTensorList'] = get_new_list_tensor(strides)
for i, dim in enumerate(strides):
if isinstance(dim, Variable):
attrs['strides'].append(-1)
infer_flags[i] = -1
else:
attrs['strides'].append(dim)
else:
attrs['strides'] = strides
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('x')
)
helper.append_op(
type='strided_slice',
inputs=inputs,
attrs=attrs,
outputs={'Out': out},
)
return out
def tensordot(x, y, axes=2, name=None):
r"""
This function computes a contraction, which sum the product of elements from two tensors along the given axes.
Args:
x (Tensor): The left tensor for contraction with data type ``float16`` or ``float32`` or ``float64``.
y (Tensor): The right tensor for contraction with the same data type as ``x``.
axes (int|tuple|list|Tensor, optional): The axes to contract for ``x`` and ``y``, defaulted to integer ``2``.
1. It could be a non-negative integer ``n``,
in which the function will sum over the last ``n`` axes of ``x`` and the first ``n`` axes of ``y`` in order.
2. It could be a 1-d tuple or list with data type ``int``, in which ``x`` and ``y`` will be contracted along the same given axes.
For example, ``axes`` =[0, 1] applies contraction along the first two axes for ``x`` and the first two axes for ``y``.
3. It could be a tuple or list containing one or two 1-d tuple|list|Tensor with data type ``int``.
When containing one tuple|list|Tensor, the data in tuple|list|Tensor specified the same axes for ``x`` and ``y`` to contract.
When containing two tuple|list|Tensor, the first will be applied to ``x`` and the second to ``y``.
When containing more than two tuple|list|Tensor, only the first two axis sequences will be used while the others will be ignored.
4. It could be a tensor, in which the ``axes`` tensor will be translated to a python list
and applied the same rules described above to determine the contraction axes.
Note that the ``axes`` with Tensor type is ONLY available in Dygraph mode.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Return:
Output (Tensor), The contraction result with the same data type as ``x`` and ``y``.
In general, :math:`output.ndim = x.ndim + y.ndim - 2 \times n_{axes}`, where :math:`n_{axes}` denotes the number of axes to be contracted.
NOTES:
1. This function supports tensor broadcast,
the size in the corresponding dimensions of ``x`` and ``y`` should be equal, or applies to the broadcast rules.
2. This function also supports axes expansion,
when the two given axis sequences for ``x`` and ``y`` are of different lengths,
the shorter sequence will expand the same axes as the longer one at the end.
For example, if ``axes`` =[[0, 1, 2, 3], [1, 0]],
the axis sequence for ``x`` is [0, 1, 2, 3],
while the corresponding axis sequences for ``y`` will be expanded from [1, 0] to [1, 0, 2, 3].
Examples:
.. code-block:: python
import paddle
data_type = 'float64'
# For two 2-d tensor x and y, the case axes=0 is equivalent to outer product.
# Note that tensordot supports empty axis sequence, so all the axes=0, axes=[], axes=[[]], and axes=[[],[]] are equivalent cases.
x = paddle.arange(4, dtype=data_type).reshape([2, 2])
y = paddle.arange(4, dtype=data_type).reshape([2, 2])
z = paddle.tensordot(x, y, axes=0)
# z = [[[[0., 0.],
# [0., 0.]],
#
# [[0., 1.],
# [2., 3.]]],
#
#
# [[[0., 2.],
# [4., 6.]],
#
# [[0., 3.],
# [6., 9.]]]]
# For two 1-d tensor x and y, the case axes=1 is equivalent to inner product.
x = paddle.arange(10, dtype=data_type)
y = paddle.arange(10, dtype=data_type)
z1 = paddle.tensordot(x, y, axes=1)
z2 = paddle.dot(x, y)
# z1 = z2 = 285.
# For two 2-d tensor x and y, the case axes=1 is equivalent to matrix multiplication.
x = paddle.arange(6, dtype=data_type).reshape([2, 3])
y = paddle.arange(12, dtype=data_type).reshape([3, 4])
z1 = paddle.tensordot(x, y, axes=1)
z2 = paddle.matmul(x, y)
# z1 = z2 = [[20., 23., 26., 29.],
# [56., 68., 80., 92.]]
# When axes is a 1-d int list, x and y will be contracted along the same given axes.
# Note that axes=[1, 2] is equivalent to axes=[[1, 2]], axes=[[1, 2], []], axes=[[1, 2], [1]], and axes=[[1, 2], [1, 2]].
x = paddle.arange(24, dtype=data_type).reshape([2, 3, 4])
y = paddle.arange(36, dtype=data_type).reshape([3, 3, 4])
z = paddle.tensordot(x, y, axes=[1, 2])
# z = [[506. , 1298., 2090.],
# [1298., 3818., 6338.]]
# When axes is a list containing two 1-d int list, the first will be applied to x and the second to y.
x = paddle.arange(60, dtype=data_type).reshape([3, 4, 5])
y = paddle.arange(24, dtype=data_type).reshape([4, 3, 2])
z = paddle.tensordot(x, y, axes=([1, 0], [0, 1]))
# z = [[4400., 4730.],
# [4532., 4874.],
# [4664., 5018.],
# [4796., 5162.],
# [4928., 5306.]]
# Thanks to the support of axes expansion, axes=[[0, 1, 3, 4], [1, 0, 3, 4]] can be abbreviated as axes= [[0, 1, 3, 4], [1, 0]].
x = paddle.arange(720, dtype=data_type).reshape([2, 3, 4, 5, 6])
y = paddle.arange(720, dtype=data_type).reshape([3, 2, 4, 5, 6])
z = paddle.tensordot(x, y, axes=[[0, 1, 3, 4], [1, 0]])
# z = [[23217330., 24915630., 26613930., 28312230.],
# [24915630., 26775930., 28636230., 30496530.],
# [26613930., 28636230., 30658530., 32680830.],
# [28312230., 30496530., 32680830., 34865130.]]
"""
op_type = 'tensordot'
input_dtype = ['float16', 'float32', 'float64']
check_variable_and_dtype(x, 'x', input_dtype, op_type)
check_variable_and_dtype(y, 'y', input_dtype, op_type)
check_type(axes, 'axes', (int, tuple, list, Variable), op_type)
def _var_to_list(var):
if in_dynamic_mode():
return tolist(var)
raise TypeError(
"The 'axes' with type 'Tensor' in "
+ op_type
+ " is not available in static graph mode, "
"please convert its type to int|Tuple|List, or use dynamic graph mode."
)
axes_x = []
axes_y = []
if np.issubdtype(type(axes), np.integer):
assert axes >= 0, (
"The 'axes' in "
+ op_type
+ f" should not be negative, but received axes={axes}."
)
axes_x = range(x.ndim - axes, x.ndim)
axes_y = range(axes)
else:
if isinstance(axes, Variable):
axes = _var_to_list(axes)
if not axes or np.issubdtype(type(axes[0]), np.integer):
axes_x = axes
else:
axes_x = axes[0]
if len(axes) > 1:
axes_y = axes[1]
if isinstance(axes_x, Variable):
axes_x = _var_to_list(axes_x)
if isinstance(axes_y, Variable):
axes_y = _var_to_list(axes_y)
axes_x, axes_y = list(axes_x), list(axes_y)
len_axes_x, len_axes_y = len(axes_x), len(axes_y)
if len_axes_x < len_axes_y:
axes_x.extend(axes_y[len_axes_x:])
elif len_axes_y < len_axes_x:
axes_y.extend(axes_x[len_axes_y:])
shape_x, shape_y = list(x.shape), list(y.shape)
need_contracted_dim_x = np.zeros((x.ndim), dtype=bool)
need_contracted_dim_y = np.zeros((y.ndim), dtype=bool)
contraction_size = 1
for i in range(len(axes_x)):
dim_x, dim_y = axes_x[i], axes_y[i]
sx, sy = shape_x[dim_x], shape_y[dim_y]
if sx == 1:
shape_y[dim_y] = 1
y = y.sum(dim_y).reshape(shape_y)
elif sy == 1:
shape_x[dim_x] = 1
x = x.sum(dim_x).reshape(shape_x)
else:
assert sx == sy, (
"The dimensional size for 'x' and 'y' in "
+ op_type
+ f" should match each other, but 'x' has size {sx} in dim {dim_x} while 'y' has size {sy} in dim {dim_y}."
)
need_contracted_dim_x[dim_x] = True
need_contracted_dim_y[dim_y] = True
contraction_size *= shape_x[dim_x]
perm_x = []
perm_y = []
shape_out = []
not_contraction_size_x = 1
not_contraction_size_y = 1
for i in range(x.ndim):
if not need_contracted_dim_x[i]:
perm_x.append(i)
shape_out.append(shape_x[i])
not_contraction_size_x *= shape_x[i]
perm_x.extend(axes_x)
perm_y.extend(axes_y)
for i in range(y.ndim):
if not need_contracted_dim_y[i]:
perm_y.append(i)
shape_out.append(shape_y[i])
not_contraction_size_y *= shape_y[i]
x = x.transpose(perm=perm_x).reshape(
[not_contraction_size_x, contraction_size]
)
y = y.transpose(perm=perm_y).reshape(
[contraction_size, not_contraction_size_y]
)
out = x.matmul(y).reshape(shape_out)
return out
def as_complex(x, name=None):
"""Transform a real tensor to a complex tensor.
The data type of the input tensor is 'float32' or 'float64', and the data
type of the returned tensor is 'complex64' or 'complex128', respectively.
The shape of the input tensor is ``(* ,2)``, (``*`` means arbitary shape), i.e.
the size of the last axis shoule be 2, which represent the real and imag part
of a complex number. The shape of the returned tensor is ``(*,)``.
Args:
x (Tensor): The input tensor. Data type is 'float32' or 'float64'.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, The output. Data type is 'complex64' or 'complex128', with the same precision as the input.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
y = paddle.as_complex(x)
print(y)
# Tensor(shape=[2, 3], dtype=complex64, place=Place(gpu:0), stop_gradient=True,
# [[1j , (2+3j) , (4+5j) ],
# [(6+7j) , (8+9j) , (10+11j)]])
"""
if in_dynamic_mode():
return _C_ops.as_complex(x)
else:
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'as_complex')
op_type = "as_complex"
helper = LayerHelper(op_type, **locals())
inputs = {"X": x}
out = helper.create_variable_for_type_inference(
dtype=_real_to_complex_dtype(x.dtype)
)
outputs = {"Out": out}
attrs = {}
helper.append_op(
type=op_type, inputs=inputs, attrs=attrs, outputs=outputs
)
return out
def as_real(x, name=None):
"""Transform a complex tensor to a real tensor.
The data type of the input tensor is 'complex64' or 'complex128', and the data
type of the returned tensor is 'float32' or 'float64', respectively.
When the shape of the input tensor is ``(*, )``, (``*`` means arbitary shape),
the shape of the output tensor is ``(*, 2)``, i.e. the shape of the output is
the shape of the input appended by an extra ``2``.
Args:
x (Tensor): The input tensor. Data type is 'complex64' or 'complex128'.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, The output. Data type is 'float32' or 'float64', with the same precision as the input.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
y = paddle.as_complex(x)
z = paddle.as_real(y)
print(z)
# Tensor(shape=[2, 3, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[0. , 1. ],
# [2. , 3. ],
# [4. , 5. ]],
# [[6. , 7. ],
# [8. , 9. ],
# [10., 11.]]])
"""
if in_dynamic_mode():
return _C_ops.as_real(x)
else:
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'as_real')
op_type = "as_real"
helper = LayerHelper(op_type, **locals())
inputs = {"X": x}
out = helper.create_variable_for_type_inference(
dtype=_complex_to_real_dtype(x.dtype)
)
outputs = {"Out": out}
helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
return out
def repeat_interleave(x, repeats, axis=None, name=None):
"""
Returns a new tensor which repeats the ``x`` tensor along dimension ``axis`` using
the entries in ``repeats`` which is a int or a Tensor.
Args:
x (Tensor): The input Tensor to be operated. The data of ``x`` can be one of float32, float64, int32, int64.
repeats (Tensor or int): The number of repetitions for each element. repeats is broadcasted to fit the shape of the given axis.
axis (int, optional): The dimension in which we manipulate. Default: None, the output tensor is flatten.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor, A Tensor with same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
repeats = paddle.to_tensor([3, 2, 1], dtype='int32')
paddle.repeat_interleave(x, repeats, 1)
# [[1, 1, 1, 2, 2, 3],
# [4, 4, 4, 5, 5, 6]]
paddle.repeat_interleave(x, 2, 0)
# [[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]
paddle.repeat_interleave(x, 2, None)
# [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]
"""
if axis is None:
x = paddle.flatten(x)
axis = 0
if in_dynamic_mode():
if isinstance(repeats, Variable):
return _C_ops.repeat_interleave_with_tensor_index(x, repeats, axis)
return _C_ops.repeat_interleave(x, repeats, axis)
helper = LayerHelper("repeat_interleave", **locals())
check_variable_and_dtype(
x,
'x',
['float32', 'float64', 'int32', 'int64'],
'paddle.tensor.manipulation.repeat_interleave',
)
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='repeat_interleave',
inputs={
'X': x,
'RepeatsTensor': repeats if isinstance(repeats, Variable) else None,
},
outputs={'Out': out},
attrs={
'dim': axis,
'Repeats': repeats if isinstance(repeats, int) else 0,
},
)
return out
def moveaxis(x, source, destination, name=None):
"""
Move the axis of tensor from ``source`` position to ``destination`` position.
Other axis that have not been moved remain their original order.
Args:
x (Tensor): The input Tensor. It is a N-D Tensor of data types bool, int32, int64, float32, float64, complex64, complex128.
source(int|tuple|list): ``source`` position of axis that will be moved. Each element must be unique and integer.
destination(int|tuple|list(int)): ``destination`` position of axis that has been moved. Each element must be unique and integer.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, A new tensor whose axis have been moved.
Examples:
.. code-block:: python
import paddle
x = paddle.ones([3, 2, 4])
paddle.moveaxis(x, [0, 1], [1, 2]).shape
# [4, 3, 2]
x = paddle.ones([2, 3])
paddle.moveaxis(x, 0, 1).shape # equivalent to paddle.t(x)
# [3, 2]
"""
src = [source] if isinstance(source, int) else source
dst = [destination] if isinstance(destination, int) else destination
assert len(src) == len(
dst
), "'source' must have the same number with 'destination'"
if len(src) != len(set(src)):
raise ValueError("Each elemment of 'source' must be unique!")
if len(dst) != len(set(dst)):
raise ValueError("Each elemment of 'destination' must be unique!")
ndim = len(x.shape)
# perm is the new order after move axis
perm = list(range(ndim))
src_dims = list(range(ndim))
dst_dims = list(range(ndim))
for i, axis in enumerate(zip(src, dst)):
assert isinstance(
axis[0], int
), "Each elemment of 'source' must be integer."
if axis[0] < 0:
assert (
axis[0] >= -ndim
), "'source' must be in the range of [-{0}, {0})".format(ndim)
src[i] += ndim
else:
assert (
axis[0] < ndim
), "'source' must be in the range of [-{0}, {0})".format(ndim)
assert isinstance(
axis[1], int
), "Each elemment of 'source' must be integer."
if axis[1] < 0:
assert (
axis[1] >= -ndim
), "'source' must be in the range of [-{0}, {0})".format(ndim)
dst[i] += ndim
else:
assert (
axis[1] < ndim
), "'source' must be in the range of [-{0}, {0})".format(ndim)
perm[dst[i]] = src[i]
src_dims.remove(src[i])
dst_dims.remove(dst[i])
for i in range(len(src_dims)):
perm[dst_dims[i]] = src_dims[i]
if in_dynamic_mode():
out = _C_ops.transpose(x, perm)
return out
else:
check_variable_and_dtype(
x,
'x',
[
'bool',
'float16',
'float32',
'float64',
'int32',
'int64',
'complex64',
'complex128',
],
'moveaxis',
)
helper = LayerHelper('moveaxis', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='transpose2',
inputs={'X': [x]},
outputs={'Out': [out], 'XShape': [x_shape]},
attrs={'axis': perm},
)
return out
def non_negative_axis(arr, axis):
ndim = len(arr.shape)
if axis >= 0:
assert (
axis < ndim
), "'axis' must be in the range of [-{0}, {0})".format(ndim)
else:
assert (
axis >= -ndim
), "'axis' must be in the range of [-{0}, {0})".format(ndim)
axis += ndim
return axis
def infer_broadcast_shape(arr, indices, axis):
# This function is used in take/put_along_axis
broadcast_shape_list = list(arr.shape)
broadcast_shape_list[axis] = list(indices.shape)[axis]
broadcast_shape = tuple(broadcast_shape_list)
for i in range(len(arr.shape)):
if arr.shape[i] < indices.shape[i]:
# if indices matrix has larger size than arr matrix, do not broadcast.
return None
return broadcast_shape
def take_along_axis(arr, indices, axis):
"""
Take values from the input array by given indices matrix along the designated axis.
Args:
arr (Tensor) : The input Tensor. Supported data types are float32 and float64.
indices (Tensor) : Indices to take along each 1d slice of arr. This must match the dimension of arr,
and need to broadcast against arr. Supported data type are int and int64.
axis (int) : The axis to take 1d slices along.
Returns:
Tensor, The indexed element, same dtype with arr
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7,8,9]])
index = paddle.to_tensor([[0]])
axis = 0
result = paddle.take_along_axis(x, index, axis)
print(result)
# [[1, 2, 3]]
"""
if len(arr.shape) != len(indices.shape):
raise ValueError(
"`indices` and `arr` must have the same number of dimensions!"
)
axis = non_negative_axis(arr, axis)
broadcast_shape = infer_broadcast_shape(arr, indices, axis)
if not broadcast_shape:
# if indices matrix have larger size than arr, arr should broadcast into indices shape.
broadcast_shape = indices.shape
if in_dynamic_mode():
indices = paddle.broadcast_to(indices, broadcast_shape)
broadcast_shape_list = list(broadcast_shape)
broadcast_shape_list[axis] = list(arr.shape)[axis]
broadcast_shape = tuple(broadcast_shape_list)
arr = paddle.broadcast_to(arr, broadcast_shape)
return _C_ops.take_along_axis(arr, indices, axis)
else:
check_variable_and_dtype(
arr,
'x',
[
'float16',
'float32',
'float64',
'int32',
'int64',
'uint8',
'uint16',
],
'take_along_axis',
)
check_variable_and_dtype(
indices, 'index', ['int32', 'int64'], 'take_along_axis'
)
indices = paddle.broadcast_to(indices, broadcast_shape)
broadcast_shape_list = list(broadcast_shape)
broadcast_shape_list[axis] = list(arr.shape)[axis]
broadcast_shape = tuple(broadcast_shape_list)
arr = paddle.broadcast_to(arr, broadcast_shape)
helper = LayerHelper('take_along_axis', **locals())
dtype = helper.input_dtype()
result = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="take_along_axis",
inputs={"Input": arr, "Index": indices},
attrs={"Axis": axis},
outputs={"Result": result},
)
return result
def put_along_axis(arr, indices, values, axis, reduce='assign'):
"""
Put values into the destination array by given indices matrix along the designated axis.
Args:
arr (Tensor) : The Destination Tensor. Supported data types are float32 and float64.
indices (Tensor) : Indices to put along each 1d slice of arr. This must match the dimension of arr,
and need to broadcast against arr. Supported data type are int and int64.
axis (int) : The axis to put 1d slices along.
reduce (str, optional): The reduce operation, default is 'assign', support 'add', 'assign', 'mul' and 'multiply'.
Returns:
Tensor, The indexed element, same dtype with arr
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[10, 30, 20], [60, 40, 50]])
index = paddle.to_tensor([[0]])
value = 99
axis = 0
result = paddle.put_along_axis(x, index, value, axis)
print(result)
# [[99, 99, 99],
# [60, 40, 50]]
"""
if len(arr.shape) != len(indices.shape):
raise ValueError(
"`indices` and `arr` must have the same number of dimensions!"
)
axis = non_negative_axis(arr, axis)
broadcast_shape = infer_broadcast_shape(arr, indices, axis)
if in_dynamic_mode():
values = (
paddle.to_tensor(values)
if not isinstance(values, paddle.Tensor)
else values
)
if broadcast_shape:
indices = paddle.broadcast_to(indices, broadcast_shape)
values = paddle.broadcast_to(values, indices.shape)
return _C_ops.put_along_axis(arr, indices, values, axis, reduce)
else:
check_variable_and_dtype(
arr,
'x',
[
'float16',
'float32',
'float64',
'int32',
'int64',
'uint8',
'uint16',
],
'put_along_axis',
)
check_variable_and_dtype(
indices, 'index', ['int32', 'int64'], 'put_along_axis'
)
if broadcast_shape:
indices = paddle.broadcast_to(indices, broadcast_shape)
values = paddle.broadcast_to(values, indices.shape)
helper = LayerHelper('put_along_axis', **locals())
dtype = helper.input_dtype()
result = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="put_along_axis",
inputs={"Input": arr, "Index": indices, "Value": values},
attrs={"Axis": axis, "Reduce": reduce},
outputs={"Result": result},
)
return result
@inplace_apis_in_dygraph_only
def put_along_axis_(arr, indices, values, axis, reduce='assign'):
r"""
Inplace version of ``put_along_axis`` API, the output Tensor will be inplaced with input ``arr``.
Please refer to :ref:`api_tensor_put_along_axis`.
"""
if len(arr.shape) != len(indices.shape):
raise ValueError(
"`indices` and `arr` must have the same number of dimensions!"
)
axis = non_negative_axis(arr, axis)
broadcast_shape = infer_broadcast_shape(arr, indices, axis)
values = (
paddle.to_tensor(values)
if not isinstance(values, paddle.Tensor)
else values
)
if broadcast_shape:
indices = paddle.broadcast_to(indices, broadcast_shape)
values = paddle.broadcast_to(values, indices.shape)
return _C_ops.put_along_axis_(arr, indices, values, axis, reduce)
def index_add(x, index, axis, value, name=None):
"""
Adds the elements of the input tensor with value tensor by selecting the indices in the order given in index.
Args:
x (Tensor) : The Destination Tensor. Supported data types are int32, int64, float16, float32, float64.
index (Tensor): The 1-D Tensor containing the indices to index.
The data type of ``index`` must be int32 or int64.
axis (int): The dimension in which we index.
value (Tensor): The tensor used to add the elements along the target axis.
name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
Tensor, same dimention and dtype with x.
Examples:
.. code-block:: python
# required: gpu
import paddle
input_tensor = paddle.to_tensor(paddle.ones((3, 3)), dtype="float32")
index = paddle.to_tensor([0, 2], dtype="int32")
value = paddle.to_tensor([[1, 1, 1], [1, 1, 1]], dtype="float32")
outplace_res = paddle.index_add(input_tensor, index, 0, value)
print(outplace_res)
# Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[2., 2., 2.],
# [1., 1., 1.],
# [2., 2., 2.]])
"""
if in_dynamic_mode():
return _C_ops.index_add(x, index, value, axis)
helper = LayerHelper("index_add", **locals())
check_variable_and_dtype(
x,
'x',
['float16', 'float32', 'float64', 'int32', 'int64', 'uint16'],
'paddle.tensor.manipulation.index_add',
)
check_variable_and_dtype(
index,
'index',
['int32', 'int64'],
'paddle.tensor.manipulation.index_add',
)
check_variable_and_dtype(
value,
'add_value',
['float16', 'float32', 'float64', 'int32', 'int64', 'uint16'],
'paddle.tensor.manipulation.index_add',
)
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='index_add',
inputs={
'X': x,
'Index': index,
'AddValue': value,
},
outputs={'Out': out},
attrs={'axis': axis},
)
return out
@inplace_apis_in_dygraph_only
def index_add_(x, index, axis, value, name=None):
"""
Inplace version of ``index_add`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_index_add`.
Examples:
.. code-block:: python
# required: gpu
import paddle
input_tensor = paddle.to_tensor(paddle.ones((3, 3)), dtype="float32")
index = paddle.to_tensor([0, 2], dtype="int32")
value = paddle.to_tensor([[1, 1], [1, 1], [1, 1]], dtype="float32")
inplace_res = paddle.index_add_(input_tensor, index, 1, value)
print(inplace_res)
# Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[2., 1., 2.],
# [2., 1., 2.],
# [2., 1., 2.]])
"""
return _C_ops.index_add_(x, index, value, axis)
@inplace_apis_in_dygraph_only
def index_put_(x, indices, value, accumulate=False, name=None):
"""
Puts values from the tensor values into the tensor x using the indices specified in indices (which is a tuple of Tensors).
The expression paddle.index_put_(x, indices, values) is equivalent to tensor[indices] = values. Returns x.
If accumulate is True, the elements in values are added to x. If accumulate is False, the behavior is undefined if indices contain duplicate elements.
Args:
x (Tensor) : The Source Tensor. Supported data types are int32, int64, float16, float32, float64, bool.
indices (Tuple of Tensor): The tuple of Tensor containing the indices to index.
The data type of ``tensor in indices`` must be int32, int64 or bool.
value (Tensor): The tensor used to be assigned to x.
accummulate (Bool, optional): Whether the elements in values are added to x. Default: False.
name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
Tensor, same dimention and dtype with x.
Examples:
.. code-block:: python
import paddle
x = paddle.zeros([3, 3])
value = paddle.ones([3])
ix1 = paddle.to_tensor([0,1,2])
ix2 = paddle.to_tensor([1,2,1])
indices=(ix1,ix2)
out = paddle.index_put_(x,indices,value)
print(x)
# Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0., 1., 0.],
# [0., 0., 1.],
# [0., 1., 0.]])
print(out)
# Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0., 1., 0.],
# [0., 0., 1.],
# [0., 1., 0.]])
"""
return _C_ops.index_put_(x, indices, value, accumulate)
def index_put(x, indices, value, accumulate=False, name=None):
"""
Outplace version of ``index_put_`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_index_put`.
Examples:
.. code-block:: python
import paddle
x = paddle.zeros([3, 3])
value = paddle.ones([3])
ix1 = paddle.to_tensor([0,1,2])
ix2 = paddle.to_tensor([1,2,1])
indices=(ix1,ix2)
out = paddle.index_put(x,indices,value)
print(x)
# Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0., 0., 0.],
# [0., 0., 0.],
# [0., 0., 0.]])
print(out)
# Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0., 1., 0.],
# [0., 0., 1.],
# [0., 1., 0.]])
"""
if in_dynamic_mode():
return _C_ops.index_put(x, indices, value, accumulate)
helper = LayerHelper("index_put", **locals())
check_variable_and_dtype(
x,
'x',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'paddle.tensor.manipulation.index_put',
)
check_variable_and_dtype(
value,
'value',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'paddle.tensor.manipulation.index_put',
)
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='index_put',
inputs={
'x': x,
'indices': indices,
'value': value,
},
outputs={'out': out},
attrs={'accumulate': accumulate},
)
return out
def unflatten(x, axis, shape, name=None):
"""
Expand a certain dimension of the input x Tensor into a desired shape.
Args:
x (Tensor) : An N-D Tensor. The data type is float16, float32, float64, int16, int32, int64, bool, uint16.
axis (int): :attr:`axis` to be unflattened, specified as an index into `x.shape`.
shape (list|tuple|Tensor): Unflatten :attr:`shape` on the specified :attr:`axis`. At most one dimension of the target :attr:`shape` can be -1.
If the input :attr:`shape` does not contain -1 , the product of all elements in ``shape`` should be equal to ``x.shape[axis]``.
The data type is `int` . If :attr:`shape` is a list or tuple, the elements of it should be integers or Tensors with shape [].
If :attr:`shape` is an Tensor, it should be an 1-D Tensor.
name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
Tensor, return the unflatten tensor of :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.randn(shape=[4, 6, 8])
shape = [2, 3]
axis = 1
res = paddle.unflatten(x, axis, shape)
print(res.shape)
# [4, 2, 3, 8]
x = paddle.randn(shape=[4, 6, 8])
shape = (-1, 2)
axis = -1
res = paddle.unflatten(x, axis, shape)
print(res.shape)
# [4, 6, 4, 2]
x = paddle.randn(shape=[4, 6, 8])
shape = paddle.to_tensor([2, 2])
axis = 0
res = paddle.unflatten(x, axis, shape)
print(res.shape)
# [2, 2, 6, 8]
"""
# determine whether the input axis is valid.
axis = non_negative_axis(x, axis)
if isinstance(shape, (list, tuple)):
new_shape = (
list(x.shape[:axis]) + list(shape) + list(x.shape[axis + 1 :])
)
elif isinstance(shape, Variable):
# The data type returned by `paddle.shape` is only 'int32'.
new_shape = paddle.concat(
[
paddle.shape(x)[:axis],
paddle.cast(shape, 'int32'),
paddle.shape(x)[axis + 1 :],
]
)
else:
raise TypeError(
"The data type of x should be one of ['List', 'Tuple', 'Tensor'], but got {}".format(
type(shape)
)
)
x = x.reshape(new_shape)
return x
@dygraph_only
def as_strided(x, shape, stride, offset=0, name=None):
"""
View x with specified shape, stride and offset.
Note that the output Tensor will share data with origin Tensor and doesn't
have a Tensor copy in ``dygraph`` mode.
Args:
x (Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool``
shape (list|tuple): Define the target shape. Each element of it should be integer.
stride (list|tuple): Define the target stride. Each element of it should be integer.
offset (int): Define the target Tensor's offset from x's holder. Default: 0.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, A as_strided Tensor with the same data type as ``x``.
Examples:
.. code-block:: python
import paddle
paddle.fluid.set_flags({"FLAGS_use_stride_kernel": True})
x = paddle.rand([2, 4, 6], dtype="float32")
out = paddle.as_strided(x, [8, 6], [6, 1])
print(out)
# the shape is [8, 6].
# the stride is [6, 1].
"""
return _C_ops.as_strided(x, shape, stride, offset)
@dygraph_only
def view(x, shape_or_dtype, name=None):
"""
View x with specified shape or dtype.
Note that the output Tensor will share data with origin Tensor and doesn't
have a Tensor copy in ``dygraph`` mode.
Args:
x (Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool``
shape_or_dtype (list|tuple|np.dtype|str|VarType): Define the target shape or dtype. If list or tuple, shape_or_dtype represents shape, each element of it should be integer. If np.dtype or str or VarType, shape_or_dtype represents dtype, it can be bool, float16, float32, float64, int8, int32, int64, uint8.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, A viewed Tensor with the same data as ``x``.
Examples:
.. code-block:: python
import paddle
paddle.fluid.set_flags({"FLAGS_use_stride_kernel": True})
x = paddle.rand([2, 4, 6], dtype="float32")
out = paddle.view(x, [8, 6])
print(out)
import paddle
paddle.fluid.set_flags({"FLAGS_use_stride_kernel": True})
x = paddle.rand([2, 4, 6], dtype="float32")
out = paddle.view(x, "uint8")
print(out)
"""
if isinstance(shape_or_dtype, (list, tuple)):
return _C_ops.view_shape(x, shape_or_dtype)
else:
if not isinstance(shape_or_dtype, core.VarDesc.VarType):
shape_or_dtype = convert_np_dtype_to_dtype_(shape_or_dtype)
return _C_ops.view_dtype(x, shape_or_dtype)
@dygraph_only
def view_as(x, other, name=None):
"""
View x with other's shape.
Note that the output Tensor will share data with origin Tensor and doesn't
have a Tensor copy in ``dygraph`` mode.
Args:
x (Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool``
other (Tensor): The result tensor has the same size as other.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, A viewed Tensor with the same shape as ``other``.
Examples:
.. code-block:: python
import paddle
paddle.fluid.set_flags({"FLAGS_use_stride_kernel": True})
x = paddle.rand([2, 4, 6], dtype="float32")
y = paddle.rand([8, 6], dtype="float32")
out = paddle.view_as(x, y)
print(out)
"""
return _C_ops.view_shape(x, other.shape)
@dygraph_only
def unfold(x, axis, size, step, name=None):
"""
View x with specified shape, stride and offset, which contains all slices of size from x in the dimension axis.
Note that the output Tensor will share data with origin Tensor and doesn't
have a Tensor copy in ``dygraph`` mode.
Args:
x (Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool``
axis (int): The axis along which the input is unfolded.
size (int): The size of each slice that is unfolded.
step (int): The step between each slice.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, A unfold Tensor with the same data type as ``x``.
Examples:
.. code-block:: python
import paddle
paddle.fluid.set_flags({"FLAGS_use_stride_kernel": True})
x = paddle.arange(9, dtype="float64")
out = paddle.unfold(x, 0, 2, 4)
print(out) # [[0, 1], [4, 5]]
"""
return _C_ops.tensor_unfold(x, axis, size, step)
# TODO(dev): We need avoid implementing it by this way.
__METHODS = {
'fill_': fill_,
'zero_': zero_,
'fill_diagonal_': fill_diagonal_,
'fill_diagonal_tensor_': fill_diagonal_tensor_,
"fill_diagonal_tensor": fill_diagonal_tensor,
'tolist': tolist,
}
for name, func in __METHODS.items():
setattr(core.eager.Tensor, name, func)
|
GHSA-2wcj-qr76-9768
|
Tests/test_imagefont.py
|
@@ -1038,6 +1038,25 @@ def test_render_mono_size():
assert_image_equal_tofile(im, "Tests/images/text_mono.gif")
+def test_too_many_characters(font):
+ with pytest.raises(ValueError):
+ font.getlength("A" * 1000001)
+ with pytest.raises(ValueError):
+ font.getbbox("A" * 1000001)
+ with pytest.raises(ValueError):
+ font.getmask2("A" * 1000001)
+
+ transposed_font = ImageFont.TransposedFont(font)
+ with pytest.raises(ValueError):
+ transposed_font.getlength("A" * 1000001)
+
+ default_font = ImageFont.load_default()
+ with pytest.raises(ValueError):
+ default_font.getlength("A" * 1000001)
+ with pytest.raises(ValueError):
+ default_font.getbbox("A" * 1000001)
+
+
@pytest.mark.parametrize(
"test_file",
[
|
import copy
import os
import re
import shutil
import sys
from io import BytesIO
import pytest
from packaging.version import parse as parse_version
from PIL import Image, ImageDraw, ImageFont, features
from .helper import (
assert_image_equal,
assert_image_equal_tofile,
assert_image_similar_tofile,
is_win32,
skip_unless_feature,
skip_unless_feature_version,
)
FONT_PATH = "Tests/fonts/FreeMono.ttf"
FONT_SIZE = 20
TEST_TEXT = "hey you\nyou are awesome\nthis looks awkward"
pytestmark = skip_unless_feature("freetype2")
def test_sanity():
assert re.search(r"\d+\.\d+\.\d+$", features.version_module("freetype2"))
@pytest.fixture(
scope="module",
params=[
pytest.param(ImageFont.Layout.BASIC),
pytest.param(ImageFont.Layout.RAQM, marks=skip_unless_feature("raqm")),
],
)
def layout_engine(request):
return request.param
@pytest.fixture(scope="module")
def font(layout_engine):
return ImageFont.truetype(FONT_PATH, FONT_SIZE, layout_engine=layout_engine)
def test_font_properties(font):
assert font.path == FONT_PATH
assert font.size == FONT_SIZE
font_copy = font.font_variant()
assert font_copy.path == FONT_PATH
assert font_copy.size == FONT_SIZE
font_copy = font.font_variant(size=FONT_SIZE + 1)
assert font_copy.size == FONT_SIZE + 1
second_font_path = "Tests/fonts/DejaVuSans/DejaVuSans.ttf"
font_copy = font.font_variant(font=second_font_path)
assert font_copy.path == second_font_path
def _render(font, layout_engine):
txt = "Hello World!"
ttf = ImageFont.truetype(font, FONT_SIZE, layout_engine=layout_engine)
ttf.getbbox(txt)
img = Image.new("RGB", (256, 64), "white")
d = ImageDraw.Draw(img)
d.text((10, 10), txt, font=ttf, fill="black")
return img
def test_font_with_name(layout_engine):
_render(FONT_PATH, layout_engine)
def test_font_with_filelike(layout_engine):
def _font_as_bytes():
with open(FONT_PATH, "rb") as f:
font_bytes = BytesIO(f.read())
return font_bytes
ttf = ImageFont.truetype(_font_as_bytes(), FONT_SIZE, layout_engine=layout_engine)
ttf_copy = ttf.font_variant()
assert ttf_copy.font_bytes == ttf.font_bytes
_render(_font_as_bytes(), layout_engine)
# Usage note: making two fonts from the same buffer fails.
# shared_bytes = _font_as_bytes()
# _render(shared_bytes)
# with pytest.raises(Exception):
# _render(shared_bytes)
def test_font_with_open_file(layout_engine):
with open(FONT_PATH, "rb") as f:
_render(f, layout_engine)
def test_render_equal(layout_engine):
img_path = _render(FONT_PATH, layout_engine)
with open(FONT_PATH, "rb") as f:
font_filelike = BytesIO(f.read())
img_filelike = _render(font_filelike, layout_engine)
assert_image_equal(img_path, img_filelike)
def test_non_ascii_path(tmp_path, layout_engine):
tempfile = str(tmp_path / ("temp_" + chr(128) + ".ttf"))
try:
shutil.copy(FONT_PATH, tempfile)
except UnicodeEncodeError:
pytest.skip("Non-ASCII path could not be created")
ImageFont.truetype(tempfile, FONT_SIZE, layout_engine=layout_engine)
def test_transparent_background(font):
im = Image.new(mode="RGBA", size=(300, 100))
draw = ImageDraw.Draw(im)
txt = "Hello World!"
draw.text((10, 10), txt, font=font)
target = "Tests/images/transparent_background_text.png"
assert_image_similar_tofile(im, target, 4.09)
target = "Tests/images/transparent_background_text_L.png"
assert_image_similar_tofile(im.convert("L"), target, 0.01)
def test_I16(font):
im = Image.new(mode="I;16", size=(300, 100))
draw = ImageDraw.Draw(im)
txt = "Hello World!"
draw.text((10, 10), txt, font=font)
target = "Tests/images/transparent_background_text_L.png"
assert_image_similar_tofile(im.convert("L"), target, 0.01)
def test_textbbox_equal(font):
im = Image.new(mode="RGB", size=(300, 100))
draw = ImageDraw.Draw(im)
txt = "Hello World!"
bbox = draw.textbbox((10, 10), txt, font)
draw.text((10, 10), txt, font=font)
draw.rectangle(bbox)
assert_image_similar_tofile(im, "Tests/images/rectangle_surrounding_text.png", 2.5)
@pytest.mark.parametrize(
"text, mode, fontname, size, length_basic, length_raqm",
(
# basic test
("text", "L", "FreeMono.ttf", 15, 36, 36),
("text", "1", "FreeMono.ttf", 15, 36, 36),
# issue 4177
("rrr", "L", "DejaVuSans/DejaVuSans.ttf", 18, 21, 22.21875),
("rrr", "1", "DejaVuSans/DejaVuSans.ttf", 18, 24, 22.21875),
# test 'l' not including extra margin
# using exact value 2047 / 64 for raqm, checked with debugger
("ill", "L", "OpenSansCondensed-LightItalic.ttf", 63, 33, 31.984375),
("ill", "1", "OpenSansCondensed-LightItalic.ttf", 63, 33, 31.984375),
),
)
def test_getlength(
text, mode, fontname, size, layout_engine, length_basic, length_raqm
):
f = ImageFont.truetype("Tests/fonts/" + fontname, size, layout_engine=layout_engine)
im = Image.new(mode, (1, 1), 0)
d = ImageDraw.Draw(im)
if layout_engine == ImageFont.Layout.BASIC:
length = d.textlength(text, f)
assert length == length_basic
else:
# disable kerning, kerning metrics changed
length = d.textlength(text, f, features=["-kern"])
assert length == length_raqm
def test_float_size():
lengths = []
for size in (48, 48.5, 49):
f = ImageFont.truetype(
"Tests/fonts/NotoSans-Regular.ttf", size, layout_engine=layout_engine
)
lengths.append(f.getlength("text"))
assert lengths[0] != lengths[1] != lengths[2]
def test_render_multiline(font):
im = Image.new(mode="RGB", size=(300, 100))
draw = ImageDraw.Draw(im)
line_spacing = font.getbbox("A")[3] + 4
lines = TEST_TEXT.split("\n")
y = 0
for line in lines:
draw.text((0, y), line, font=font)
y += line_spacing
# some versions of freetype have different horizontal spacing.
# setting a tight epsilon, I'm showing the original test failure
# at epsilon = ~38.
assert_image_similar_tofile(im, "Tests/images/multiline_text.png", 6.2)
def test_render_multiline_text(font):
# Test that text() correctly connects to multiline_text()
# and that align defaults to left
im = Image.new(mode="RGB", size=(300, 100))
draw = ImageDraw.Draw(im)
draw.text((0, 0), TEST_TEXT, font=font)
assert_image_similar_tofile(im, "Tests/images/multiline_text.png", 0.01)
# Test that text() can pass on additional arguments
# to multiline_text()
draw.text(
(0, 0), TEST_TEXT, fill=None, font=font, anchor=None, spacing=4, align="left"
)
draw.text((0, 0), TEST_TEXT, None, font, None, 4, "left")
@pytest.mark.parametrize(
"align, ext", (("left", ""), ("center", "_center"), ("right", "_right"))
)
def test_render_multiline_text_align(font, align, ext):
im = Image.new(mode="RGB", size=(300, 100))
draw = ImageDraw.Draw(im)
draw.multiline_text((0, 0), TEST_TEXT, font=font, align=align)
assert_image_similar_tofile(im, f"Tests/images/multiline_text{ext}.png", 0.01)
def test_unknown_align(font):
im = Image.new(mode="RGB", size=(300, 100))
draw = ImageDraw.Draw(im)
# Act/Assert
with pytest.raises(ValueError):
draw.multiline_text((0, 0), TEST_TEXT, font=font, align="unknown")
def test_draw_align(font):
im = Image.new("RGB", (300, 100), "white")
draw = ImageDraw.Draw(im)
line = "some text"
draw.text((100, 40), line, (0, 0, 0), font=font, align="left")
def test_multiline_bbox(font):
im = Image.new(mode="RGB", size=(300, 100))
draw = ImageDraw.Draw(im)
# Test that textbbox() correctly connects to multiline_textbbox()
assert draw.textbbox((0, 0), TEST_TEXT, font=font) == draw.multiline_textbbox(
(0, 0), TEST_TEXT, font=font
)
# Test that multiline_textbbox corresponds to ImageFont.textbbox()
# for single line text
assert font.getbbox("A") == draw.multiline_textbbox((0, 0), "A", font=font)
# Test that textbbox() can pass on additional arguments
# to multiline_textbbox()
draw.textbbox((0, 0), TEST_TEXT, font=font, spacing=4)
def test_multiline_width(font):
im = Image.new(mode="RGB", size=(300, 100))
draw = ImageDraw.Draw(im)
assert (
draw.textbbox((0, 0), "longest line", font=font)[2]
== draw.multiline_textbbox((0, 0), "longest line\nline", font=font)[2]
)
def test_multiline_spacing(font):
im = Image.new(mode="RGB", size=(300, 100))
draw = ImageDraw.Draw(im)
draw.multiline_text((0, 0), TEST_TEXT, font=font, spacing=10)
assert_image_similar_tofile(im, "Tests/images/multiline_text_spacing.png", 2.5)
@pytest.mark.parametrize(
"orientation", (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270)
)
def test_rotated_transposed_font(font, orientation):
img_grey = Image.new("L", (100, 100))
draw = ImageDraw.Draw(img_grey)
word = "testing"
transposed_font = ImageFont.TransposedFont(font, orientation=orientation)
# Original font
draw.font = font
bbox_a = draw.textbbox((10, 10), word)
# Rotated font
draw.font = transposed_font
bbox_b = draw.textbbox((20, 20), word)
# Check (w, h) of box a is (h, w) of box b
assert (
bbox_a[2] - bbox_a[0],
bbox_a[3] - bbox_a[1],
) == (
bbox_b[3] - bbox_b[1],
bbox_b[2] - bbox_b[0],
)
# Check top left co-ordinates are correct
assert bbox_b[:2] == (20, 20)
# text length is undefined for vertical text
with pytest.raises(ValueError):
draw.textlength(word)
@pytest.mark.parametrize(
"orientation",
(
None,
Image.Transpose.ROTATE_180,
Image.Transpose.FLIP_LEFT_RIGHT,
Image.Transpose.FLIP_TOP_BOTTOM,
),
)
def test_unrotated_transposed_font(font, orientation):
img_grey = Image.new("L", (100, 100))
draw = ImageDraw.Draw(img_grey)
word = "testing"
transposed_font = ImageFont.TransposedFont(font, orientation=orientation)
# Original font
draw.font = font
bbox_a = draw.textbbox((10, 10), word)
length_a = draw.textlength(word)
# Rotated font
draw.font = transposed_font
bbox_b = draw.textbbox((20, 20), word)
length_b = draw.textlength(word)
# Check boxes a and b are same size
assert (
bbox_a[2] - bbox_a[0],
bbox_a[3] - bbox_a[1],
) == (
bbox_b[2] - bbox_b[0],
bbox_b[3] - bbox_b[1],
)
# Check top left co-ordinates are correct
assert bbox_b[:2] == (20, 20)
assert length_a == length_b
@pytest.mark.parametrize(
"orientation", (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270)
)
def test_rotated_transposed_font_get_mask(font, orientation):
# Arrange
text = "mask this"
transposed_font = ImageFont.TransposedFont(font, orientation=orientation)
# Act
mask = transposed_font.getmask(text)
# Assert
assert mask.size == (13, 108)
@pytest.mark.parametrize(
"orientation",
(
None,
Image.Transpose.ROTATE_180,
Image.Transpose.FLIP_LEFT_RIGHT,
Image.Transpose.FLIP_TOP_BOTTOM,
),
)
def test_unrotated_transposed_font_get_mask(font, orientation):
# Arrange
text = "mask this"
transposed_font = ImageFont.TransposedFont(font, orientation=orientation)
# Act
mask = transposed_font.getmask(text)
# Assert
assert mask.size == (108, 13)
def test_free_type_font_get_name(font):
assert ("FreeMono", "Regular") == font.getname()
def test_free_type_font_get_metrics(font):
ascent, descent = font.getmetrics()
assert isinstance(ascent, int)
assert isinstance(descent, int)
assert (ascent, descent) == (16, 4)
def test_free_type_font_get_mask(font):
# Arrange
text = "mask this"
# Act
mask = font.getmask(text)
# Assert
assert mask.size == (108, 13)
def test_load_path_not_found():
# Arrange
filename = "somefilenamethatdoesntexist.ttf"
# Act/Assert
with pytest.raises(OSError):
ImageFont.load_path(filename)
with pytest.raises(OSError):
ImageFont.truetype(filename)
def test_load_non_font_bytes():
with open("Tests/images/hopper.jpg", "rb") as f:
with pytest.raises(OSError):
ImageFont.truetype(f)
def test_default_font():
# Arrange
txt = 'This is a "better than nothing" default font.'
im = Image.new(mode="RGB", size=(300, 100))
draw = ImageDraw.Draw(im)
# Act
default_font = ImageFont.load_default()
draw.text((10, 10), txt, font=default_font)
# Assert
assert_image_equal_tofile(im, "Tests/images/default_font.png")
@pytest.mark.parametrize("mode", (None, "1", "RGBA"))
def test_getbbox(font, mode):
assert (0, 4, 12, 16) == font.getbbox("A", mode)
def test_getbbox_empty(font):
# issue #2614, should not crash.
assert (0, 0, 0, 0) == font.getbbox("")
def test_render_empty(font):
# issue 2666
im = Image.new(mode="RGB", size=(300, 100))
target = im.copy()
draw = ImageDraw.Draw(im)
# should not crash here.
draw.text((10, 10), "", font=font)
assert_image_equal(im, target)
def test_unicode_pilfont():
# should not segfault, should return UnicodeDecodeError
# issue #2826
font = ImageFont.load_default()
with pytest.raises(UnicodeEncodeError):
font.getbbox("’")
def test_unicode_extended(layout_engine):
# issue #3777
text = "A\u278A\U0001F12B"
target = "Tests/images/unicode_extended.png"
ttf = ImageFont.truetype(
"Tests/fonts/NotoSansSymbols-Regular.ttf",
FONT_SIZE,
layout_engine=layout_engine,
)
img = Image.new("RGB", (100, 60))
d = ImageDraw.Draw(img)
d.text((10, 10), text, font=ttf)
# fails with 14.7
assert_image_similar_tofile(img, target, 6.2)
@pytest.mark.parametrize(
"platform, font_directory",
(("linux", "/usr/local/share/fonts"), ("darwin", "/System/Library/Fonts")),
)
@pytest.mark.skipif(is_win32(), reason="requires Unix or macOS")
def test_find_font(monkeypatch, platform, font_directory):
def _test_fake_loading_font(path_to_fake, fontname):
# Make a copy of FreeTypeFont so we can patch the original
free_type_font = copy.deepcopy(ImageFont.FreeTypeFont)
with monkeypatch.context() as m:
m.setattr(ImageFont, "_FreeTypeFont", free_type_font, raising=False)
def loadable_font(filepath, size, index, encoding, *args, **kwargs):
if filepath == path_to_fake:
return ImageFont._FreeTypeFont(
FONT_PATH, size, index, encoding, *args, **kwargs
)
return ImageFont._FreeTypeFont(
filepath, size, index, encoding, *args, **kwargs
)
m.setattr(ImageFont, "FreeTypeFont", loadable_font)
font = ImageFont.truetype(fontname)
# Make sure it's loaded
name = font.getname()
assert ("FreeMono", "Regular") == name
# A lot of mocking here - this is more for hitting code and
# catching syntax like errors
monkeypatch.setattr(sys, "platform", platform)
if platform == "linux":
monkeypatch.setenv("XDG_DATA_DIRS", "/usr/share/:/usr/local/share/")
def fake_walker(path):
if path == font_directory:
return [
(
path,
[],
["Arial.ttf", "Single.otf", "Duplicate.otf", "Duplicate.ttf"],
)
]
return [(path, [], ["some_random_font.ttf"])]
monkeypatch.setattr(os, "walk", fake_walker)
# Test that the font loads both with and without the extension
_test_fake_loading_font(font_directory + "/Arial.ttf", "Arial.ttf")
_test_fake_loading_font(font_directory + "/Arial.ttf", "Arial")
# Test that non-ttf fonts can be found without the extension
_test_fake_loading_font(font_directory + "/Single.otf", "Single")
# Test that ttf fonts are preferred if the extension is not specified
_test_fake_loading_font(font_directory + "/Duplicate.ttf", "Duplicate")
def test_imagefont_getters(font):
assert font.getmetrics() == (16, 4)
assert font.font.ascent == 16
assert font.font.descent == 4
assert font.font.height == 20
assert font.font.x_ppem == 20
assert font.font.y_ppem == 20
assert font.font.glyphs == 4177
assert font.getbbox("A") == (0, 4, 12, 16)
assert font.getbbox("AB") == (0, 4, 24, 16)
assert font.getbbox("M") == (0, 4, 12, 16)
assert font.getbbox("y") == (0, 7, 12, 20)
assert font.getbbox("a") == (0, 7, 12, 16)
assert font.getlength("A") == 12
assert font.getlength("AB") == 24
assert font.getlength("M") == 12
assert font.getlength("y") == 12
assert font.getlength("a") == 12
@pytest.mark.parametrize("stroke_width", (0, 2))
def test_getsize_stroke(font, stroke_width):
assert font.getbbox("A", stroke_width=stroke_width) == (
0 - stroke_width,
4 - stroke_width,
12 + stroke_width,
16 + stroke_width,
)
def test_complex_font_settings():
t = ImageFont.truetype(FONT_PATH, FONT_SIZE, layout_engine=ImageFont.Layout.BASIC)
with pytest.raises(KeyError):
t.getmask("абвг", direction="rtl")
with pytest.raises(KeyError):
t.getmask("абвг", features=["-kern"])
with pytest.raises(KeyError):
t.getmask("абвг", language="sr")
def test_variation_get(font):
freetype = parse_version(features.version_module("freetype2"))
if freetype < parse_version("2.9.1"):
with pytest.raises(NotImplementedError):
font.get_variation_names()
with pytest.raises(NotImplementedError):
font.get_variation_axes()
return
with pytest.raises(OSError):
font.get_variation_names()
with pytest.raises(OSError):
font.get_variation_axes()
font = ImageFont.truetype("Tests/fonts/AdobeVFPrototype.ttf")
assert font.get_variation_names(), [
b"ExtraLight",
b"Light",
b"Regular",
b"Semibold",
b"Bold",
b"Black",
b"Black Medium Contrast",
b"Black High Contrast",
b"Default",
]
assert font.get_variation_axes() == [
{"name": b"Weight", "minimum": 200, "maximum": 900, "default": 389},
{"name": b"Contrast", "minimum": 0, "maximum": 100, "default": 0},
]
font = ImageFont.truetype("Tests/fonts/TINY5x3GX.ttf")
assert font.get_variation_names() == [
b"20",
b"40",
b"60",
b"80",
b"100",
b"120",
b"140",
b"160",
b"180",
b"200",
b"220",
b"240",
b"260",
b"280",
b"300",
b"Regular",
]
assert font.get_variation_axes() == [
{"name": b"Size", "minimum": 0, "maximum": 300, "default": 0}
]
def _check_text(font, path, epsilon):
im = Image.new("RGB", (100, 75), "white")
d = ImageDraw.Draw(im)
d.text((10, 10), "Text", font=font, fill="black")
try:
assert_image_similar_tofile(im, path, epsilon)
except AssertionError:
if "_adobe" in path:
path = path.replace("_adobe", "_adobe_older_harfbuzz")
assert_image_similar_tofile(im, path, epsilon)
else:
raise
def test_variation_set_by_name(font):
freetype = parse_version(features.version_module("freetype2"))
if freetype < parse_version("2.9.1"):
with pytest.raises(NotImplementedError):
font.set_variation_by_name("Bold")
return
with pytest.raises(OSError):
font.set_variation_by_name("Bold")
font = ImageFont.truetype("Tests/fonts/AdobeVFPrototype.ttf", 36)
_check_text(font, "Tests/images/variation_adobe.png", 11)
for name in ["Bold", b"Bold"]:
font.set_variation_by_name(name)
assert font.getname()[1] == "Bold"
_check_text(font, "Tests/images/variation_adobe_name.png", 16)
font = ImageFont.truetype("Tests/fonts/TINY5x3GX.ttf", 36)
_check_text(font, "Tests/images/variation_tiny.png", 40)
for name in ["200", b"200"]:
font.set_variation_by_name(name)
assert font.getname()[1] == "200"
_check_text(font, "Tests/images/variation_tiny_name.png", 40)
def test_variation_set_by_axes(font):
freetype = parse_version(features.version_module("freetype2"))
if freetype < parse_version("2.9.1"):
with pytest.raises(NotImplementedError):
font.set_variation_by_axes([100])
return
with pytest.raises(OSError):
font.set_variation_by_axes([500, 50])
font = ImageFont.truetype("Tests/fonts/AdobeVFPrototype.ttf", 36)
font.set_variation_by_axes([500, 50])
_check_text(font, "Tests/images/variation_adobe_axes.png", 11.05)
font = ImageFont.truetype("Tests/fonts/TINY5x3GX.ttf", 36)
font.set_variation_by_axes([100])
_check_text(font, "Tests/images/variation_tiny_axes.png", 32.5)
def test_textbbox_non_freetypefont():
im = Image.new("RGB", (200, 200))
d = ImageDraw.Draw(im)
default_font = ImageFont.load_default()
assert d.textlength("test", font=default_font) == 24
assert d.textbbox((0, 0), "test", font=default_font) == (0, 0, 24, 11)
@pytest.mark.parametrize(
"anchor, left, top",
(
# test horizontal anchors
("ls", 0, -36),
("ms", -64, -36),
("rs", -128, -36),
# test vertical anchors
("ma", -64, 16),
("mt", -64, 0),
("mm", -64, -17),
("mb", -64, -44),
("md", -64, -51),
),
ids=("ls", "ms", "rs", "ma", "mt", "mm", "mb", "md"),
)
def test_anchor(layout_engine, anchor, left, top):
name, text = "quick", "Quick"
path = f"Tests/images/test_anchor_{name}_{anchor}.png"
if layout_engine == ImageFont.Layout.RAQM:
width, height = (129, 44)
else:
width, height = (128, 44)
bbox_expected = (left, top, left + width, top + height)
f = ImageFont.truetype(
"Tests/fonts/NotoSans-Regular.ttf", 48, layout_engine=layout_engine
)
im = Image.new("RGB", (200, 200), "white")
d = ImageDraw.Draw(im)
d.line(((0, 100), (200, 100)), "gray")
d.line(((100, 0), (100, 200)), "gray")
d.text((100, 100), text, fill="black", anchor=anchor, font=f)
assert d.textbbox((0, 0), text, f, anchor=anchor) == bbox_expected
assert_image_similar_tofile(im, path, 7)
@pytest.mark.parametrize(
"anchor, align",
(
# test horizontal anchors
("lm", "left"),
("lm", "center"),
("lm", "right"),
("mm", "left"),
("mm", "center"),
("mm", "right"),
("rm", "left"),
("rm", "center"),
("rm", "right"),
# test vertical anchors
("ma", "center"),
# ("mm", "center"), # duplicate
("md", "center"),
),
)
def test_anchor_multiline(layout_engine, anchor, align):
target = f"Tests/images/test_anchor_multiline_{anchor}_{align}.png"
text = "a\nlong\ntext sample"
f = ImageFont.truetype(
"Tests/fonts/NotoSans-Regular.ttf", 48, layout_engine=layout_engine
)
# test render
im = Image.new("RGB", (600, 400), "white")
d = ImageDraw.Draw(im)
d.line(((0, 200), (600, 200)), "gray")
d.line(((300, 0), (300, 400)), "gray")
d.multiline_text((300, 200), text, fill="black", anchor=anchor, font=f, align=align)
assert_image_similar_tofile(im, target, 4)
def test_anchor_invalid(font):
im = Image.new("RGB", (100, 100), "white")
d = ImageDraw.Draw(im)
d.font = font
for anchor in ["", "l", "a", "lax", "sa", "xa", "lx"]:
with pytest.raises(ValueError):
font.getmask2("hello", anchor=anchor)
with pytest.raises(ValueError):
font.getbbox("hello", anchor=anchor)
with pytest.raises(ValueError):
d.text((0, 0), "hello", anchor=anchor)
with pytest.raises(ValueError):
d.textbbox((0, 0), "hello", anchor=anchor)
with pytest.raises(ValueError):
d.multiline_text((0, 0), "foo\nbar", anchor=anchor)
with pytest.raises(ValueError):
d.multiline_textbbox((0, 0), "foo\nbar", anchor=anchor)
for anchor in ["lt", "lb"]:
with pytest.raises(ValueError):
d.multiline_text((0, 0), "foo\nbar", anchor=anchor)
with pytest.raises(ValueError):
d.multiline_textbbox((0, 0), "foo\nbar", anchor=anchor)
@pytest.mark.parametrize("bpp", (1, 2, 4, 8))
def test_bitmap_font(layout_engine, bpp):
text = "Bitmap Font"
layout_name = ["basic", "raqm"][layout_engine]
target = f"Tests/images/bitmap_font_{bpp}_{layout_name}.png"
font = ImageFont.truetype(
f"Tests/fonts/DejaVuSans/DejaVuSans-24-{bpp}-stripped.ttf",
24,
layout_engine=layout_engine,
)
im = Image.new("RGB", (160, 35), "white")
draw = ImageDraw.Draw(im)
draw.text((2, 2), text, "black", font)
assert_image_equal_tofile(im, target)
def test_bitmap_font_stroke(layout_engine):
text = "Bitmap Font"
layout_name = ["basic", "raqm"][layout_engine]
target = f"Tests/images/bitmap_font_stroke_{layout_name}.png"
font = ImageFont.truetype(
"Tests/fonts/DejaVuSans/DejaVuSans-24-8-stripped.ttf",
24,
layout_engine=layout_engine,
)
im = Image.new("RGB", (160, 35), "white")
draw = ImageDraw.Draw(im)
draw.text((2, 2), text, "black", font, stroke_width=2, stroke_fill="red")
assert_image_similar_tofile(im, target, 0.03)
def test_standard_embedded_color(layout_engine):
txt = "Hello World!"
ttf = ImageFont.truetype(FONT_PATH, 40, layout_engine=layout_engine)
ttf.getbbox(txt)
im = Image.new("RGB", (300, 64), "white")
d = ImageDraw.Draw(im)
d.text((10, 10), txt, font=ttf, fill="#fa6", embedded_color=True)
assert_image_similar_tofile(im, "Tests/images/standard_embedded.png", 3.1)
@pytest.mark.parametrize("fontmode", ("1", "L", "RGBA"))
def test_float_coord(layout_engine, fontmode):
txt = "Hello World!"
ttf = ImageFont.truetype(FONT_PATH, 40, layout_engine=layout_engine)
im = Image.new("RGB", (300, 64), "white")
d = ImageDraw.Draw(im)
if fontmode == "1":
d.fontmode = "1"
embedded_color = fontmode == "RGBA"
d.text((9.5, 9.5), txt, font=ttf, fill="#fa6", embedded_color=embedded_color)
try:
assert_image_similar_tofile(im, "Tests/images/text_float_coord.png", 3.9)
except AssertionError:
if fontmode == "1" and layout_engine == ImageFont.Layout.BASIC:
assert_image_similar_tofile(
im, "Tests/images/text_float_coord_1_alt.png", 1
)
else:
raise
def test_cbdt(layout_engine):
try:
font = ImageFont.truetype(
"Tests/fonts/NotoColorEmoji.ttf", size=109, layout_engine=layout_engine
)
im = Image.new("RGB", (150, 150), "white")
d = ImageDraw.Draw(im)
d.text((10, 10), "\U0001f469", font=font, embedded_color=True)
assert_image_similar_tofile(im, "Tests/images/cbdt_notocoloremoji.png", 6.2)
except OSError as e: # pragma: no cover
assert str(e) in ("unimplemented feature", "unknown file format")
pytest.skip("freetype compiled without libpng or CBDT support")
def test_cbdt_mask(layout_engine):
try:
font = ImageFont.truetype(
"Tests/fonts/NotoColorEmoji.ttf", size=109, layout_engine=layout_engine
)
im = Image.new("RGB", (150, 150), "white")
d = ImageDraw.Draw(im)
d.text((10, 10), "\U0001f469", "black", font=font)
assert_image_similar_tofile(
im, "Tests/images/cbdt_notocoloremoji_mask.png", 6.2
)
except OSError as e: # pragma: no cover
assert str(e) in ("unimplemented feature", "unknown file format")
pytest.skip("freetype compiled without libpng or CBDT support")
def test_sbix(layout_engine):
try:
font = ImageFont.truetype(
"Tests/fonts/chromacheck-sbix.woff", size=300, layout_engine=layout_engine
)
im = Image.new("RGB", (400, 400), "white")
d = ImageDraw.Draw(im)
d.text((50, 50), "\uE901", font=font, embedded_color=True)
assert_image_similar_tofile(im, "Tests/images/chromacheck-sbix.png", 1)
except OSError as e: # pragma: no cover
assert str(e) in ("unimplemented feature", "unknown file format")
pytest.skip("freetype compiled without libpng or SBIX support")
def test_sbix_mask(layout_engine):
try:
font = ImageFont.truetype(
"Tests/fonts/chromacheck-sbix.woff", size=300, layout_engine=layout_engine
)
im = Image.new("RGB", (400, 400), "white")
d = ImageDraw.Draw(im)
d.text((50, 50), "\uE901", (100, 0, 0), font=font)
assert_image_similar_tofile(im, "Tests/images/chromacheck-sbix_mask.png", 1)
except OSError as e: # pragma: no cover
assert str(e) in ("unimplemented feature", "unknown file format")
pytest.skip("freetype compiled without libpng or SBIX support")
@skip_unless_feature_version("freetype2", "2.10.0")
def test_colr(layout_engine):
font = ImageFont.truetype(
"Tests/fonts/BungeeColor-Regular_colr_Windows.ttf",
size=64,
layout_engine=layout_engine,
)
im = Image.new("RGB", (300, 75), "white")
d = ImageDraw.Draw(im)
d.text((15, 5), "Bungee", font=font, embedded_color=True)
assert_image_similar_tofile(im, "Tests/images/colr_bungee.png", 21)
@skip_unless_feature_version("freetype2", "2.10.0")
def test_colr_mask(layout_engine):
font = ImageFont.truetype(
"Tests/fonts/BungeeColor-Regular_colr_Windows.ttf",
size=64,
layout_engine=layout_engine,
)
im = Image.new("RGB", (300, 75), "white")
d = ImageDraw.Draw(im)
d.text((15, 5), "Bungee", "black", font=font)
assert_image_similar_tofile(im, "Tests/images/colr_bungee_mask.png", 22)
def test_woff2(layout_engine):
try:
font = ImageFont.truetype(
"Tests/fonts/OpenSans.woff2",
size=64,
layout_engine=layout_engine,
)
except OSError as e:
assert str(e) in ("unimplemented feature", "unknown file format")
pytest.skip("FreeType compiled without brotli or WOFF2 support")
im = Image.new("RGB", (350, 100), "white")
d = ImageDraw.Draw(im)
d.text((15, 5), "OpenSans", "black", font=font)
assert_image_similar_tofile(im, "Tests/images/test_woff2.png", 5)
def test_render_mono_size():
# issue 4177
im = Image.new("P", (100, 30), "white")
draw = ImageDraw.Draw(im)
ttf = ImageFont.truetype(
"Tests/fonts/DejaVuSans/DejaVuSans.ttf",
18,
layout_engine=ImageFont.Layout.BASIC,
)
draw.text((10, 10), "r" * 10, "black", ttf)
assert_image_equal_tofile(im, "Tests/images/text_mono.gif")
@pytest.mark.parametrize(
"test_file",
[
"Tests/fonts/oom-e8e927ba6c0d38274a37c1567560eb33baf74627.ttf",
"Tests/fonts/oom-4da0210eb7081b0bf15bf16cc4c52ce02c1e1bbc.ttf",
],
)
def test_oom(test_file):
with open(test_file, "rb") as f:
font = ImageFont.truetype(BytesIO(f.read()))
with pytest.raises(Image.DecompressionBombError):
font.getmask("Test Text")
def test_raqm_missing_warning(monkeypatch):
monkeypatch.setattr(ImageFont.core, "HAVE_RAQM", False)
with pytest.warns(UserWarning) as record:
font = ImageFont.truetype(
FONT_PATH, FONT_SIZE, layout_engine=ImageFont.Layout.RAQM
)
assert font.layout_engine == ImageFont.Layout.BASIC
assert str(record[-1].message) == (
"Raqm layout was requested, but Raqm is not available. "
"Falling back to basic layout."
)
|
GHSA-8ghj-p4vj-mr35
|
src/PIL/ImageFont.py
|
@@ -41,6 +41,9 @@ class Layout(IntEnum):
RAQM = 1
+MAX_STRING_LENGTH = 1000000
+
+
try:
from . import _imagingft as core
except ImportError as ex:
@@ -49,6 +52,12 @@ class Layout(IntEnum):
core = DeferredError(ex)
+def _string_length_check(text):
+ if MAX_STRING_LENGTH is not None and len(text) > MAX_STRING_LENGTH:
+ msg = "too many characters in string"
+ raise ValueError(msg)
+
+
# FIXME: add support for pilfont2 format (see FontFile.py)
# --------------------------------------------------------------------
@@ -152,6 +161,7 @@ def getbbox(self, text, *args, **kwargs):
:return: ``(left, top, right, bottom)`` bounding box
"""
+ _string_length_check(text)
width, height = self.font.getsize(text)
return 0, 0, width, height
@@ -162,6 +172,7 @@ def getlength(self, text, *args, **kwargs):
.. versionadded:: 9.2.0
"""
+ _string_length_check(text)
width, height = self.font.getsize(text)
return width
@@ -309,6 +320,7 @@ def getlength(self, text, mode="", direction=None, features=None, language=None)
:return: Width for horizontal, height for vertical text.
"""
+ _string_length_check(text)
return self.font.getlength(text, mode, direction, features, language) / 64
def getbbox(
@@ -368,6 +380,7 @@ def getbbox(
:return: ``(left, top, right, bottom)`` bounding box
"""
+ _string_length_check(text)
size, offset = self.font.getsize(
text, mode, direction, features, language, anchor
)
@@ -546,6 +559,7 @@ def getmask2(
:py:mod:`PIL.Image.core` interface module, and the text offset, the
gap between the starting coordinate and the first marking
"""
+ _string_length_check(text)
if start is None:
start = (0, 0)
im, size, offset = self.font.render(
@@ -684,6 +698,7 @@ def getlength(self, text, *args, **kwargs):
if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270):
msg = "text length is undefined for text rotated by 90 or 270 degrees"
raise ValueError(msg)
+ _string_length_check(text)
return self.font.getlength(text, *args, **kwargs)
|
#
# The Python Imaging Library.
# $Id$
#
# PIL raster font management
#
# History:
# 1996-08-07 fl created (experimental)
# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3
# 1999-02-06 fl rewrote most font management stuff in C
# 1999-03-17 fl take pth files into account in load_path (from Richard Jones)
# 2001-02-17 fl added freetype support
# 2001-05-09 fl added TransposedFont wrapper class
# 2002-03-04 fl make sure we have a "L" or "1" font
# 2002-12-04 fl skip non-directory entries in the system path
# 2003-04-29 fl add embedded default font
# 2003-09-27 fl added support for truetype charmap encodings
#
# Todo:
# Adapt to PILFONT2 format (16-bit fonts, compressed, single file)
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import base64
import os
import sys
import warnings
from enum import IntEnum
from io import BytesIO
from . import Image
from ._util import is_directory, is_path
class Layout(IntEnum):
BASIC = 0
RAQM = 1
try:
from . import _imagingft as core
except ImportError as ex:
from ._util import DeferredError
core = DeferredError(ex)
# FIXME: add support for pilfont2 format (see FontFile.py)
# --------------------------------------------------------------------
# Font metrics format:
# "PILfont" LF
# fontdescriptor LF
# (optional) key=value... LF
# "DATA" LF
# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)
#
# To place a character, cut out srcbox and paste at dstbox,
# relative to the character position. Then move the character
# position according to dx, dy.
# --------------------------------------------------------------------
class ImageFont:
"""PIL font wrapper"""
def _load_pilfont(self, filename):
with open(filename, "rb") as fp:
image = None
for ext in (".png", ".gif", ".pbm"):
if image:
image.close()
try:
fullname = os.path.splitext(filename)[0] + ext
image = Image.open(fullname)
except Exception:
pass
else:
if image and image.mode in ("1", "L"):
break
else:
if image:
image.close()
msg = "cannot find glyph data file"
raise OSError(msg)
self.file = fullname
self._load_pilfont_data(fp, image)
image.close()
def _load_pilfont_data(self, file, image):
# read PILfont header
if file.readline() != b"PILfont\n":
msg = "Not a PILfont file"
raise SyntaxError(msg)
file.readline().split(b";")
self.info = [] # FIXME: should be a dictionary
while True:
s = file.readline()
if not s or s == b"DATA\n":
break
self.info.append(s)
# read PILfont metrics
data = file.read(256 * 20)
# check image
if image.mode not in ("1", "L"):
msg = "invalid font image mode"
raise TypeError(msg)
image.load()
self.font = Image.core.font(image.im, data)
def getmask(self, text, mode="", *args, **kwargs):
"""
Create a bitmap for the text.
If the font uses antialiasing, the bitmap should have mode ``L`` and use a
maximum value of 255. Otherwise, it should have mode ``1``.
:param text: Text to render.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
.. versionadded:: 1.1.5
:return: An internal PIL storage memory instance as defined by the
:py:mod:`PIL.Image.core` interface module.
"""
return self.font.getmask(text, mode)
def getbbox(self, text, *args, **kwargs):
"""
Returns bounding box (in pixels) of given text.
.. versionadded:: 9.2.0
:param text: Text to render.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
:return: ``(left, top, right, bottom)`` bounding box
"""
width, height = self.font.getsize(text)
return 0, 0, width, height
def getlength(self, text, *args, **kwargs):
"""
Returns length (in pixels) of given text.
This is the amount by which following text should be offset.
.. versionadded:: 9.2.0
"""
width, height = self.font.getsize(text)
return width
##
# Wrapper for FreeType fonts. Application code should use the
# <b>truetype</b> factory function to create font objects.
class FreeTypeFont:
"""FreeType font wrapper (requires _imagingft service)"""
def __init__(self, font=None, size=10, index=0, encoding="", layout_engine=None):
# FIXME: use service provider instead
self.path = font
self.size = size
self.index = index
self.encoding = encoding
if layout_engine not in (Layout.BASIC, Layout.RAQM):
layout_engine = Layout.BASIC
if core.HAVE_RAQM:
layout_engine = Layout.RAQM
elif layout_engine == Layout.RAQM and not core.HAVE_RAQM:
warnings.warn(
"Raqm layout was requested, but Raqm is not available. "
"Falling back to basic layout."
)
layout_engine = Layout.BASIC
self.layout_engine = layout_engine
def load_from_bytes(f):
self.font_bytes = f.read()
self.font = core.getfont(
"", size, index, encoding, self.font_bytes, layout_engine
)
if is_path(font):
if sys.platform == "win32":
font_bytes_path = font if isinstance(font, bytes) else font.encode()
try:
font_bytes_path.decode("ascii")
except UnicodeDecodeError:
# FreeType cannot load fonts with non-ASCII characters on Windows
# So load it into memory first
with open(font, "rb") as f:
load_from_bytes(f)
return
self.font = core.getfont(
font, size, index, encoding, layout_engine=layout_engine
)
else:
load_from_bytes(font)
def __getstate__(self):
return [self.path, self.size, self.index, self.encoding, self.layout_engine]
def __setstate__(self, state):
path, size, index, encoding, layout_engine = state
self.__init__(path, size, index, encoding, layout_engine)
def getname(self):
"""
:return: A tuple of the font family (e.g. Helvetica) and the font style
(e.g. Bold)
"""
return self.font.family, self.font.style
def getmetrics(self):
"""
:return: A tuple of the font ascent (the distance from the baseline to
the highest outline point) and descent (the distance from the
baseline to the lowest outline point, a negative value)
"""
return self.font.ascent, self.font.descent
def getlength(self, text, mode="", direction=None, features=None, language=None):
"""
Returns length (in pixels with 1/64 precision) of given text when rendered
in font with provided direction, features, and language.
This is the amount by which following text should be offset.
Text bounding box may extend past the length in some fonts,
e.g. when using italics or accents.
The result is returned as a float; it is a whole number if using basic layout.
Note that the sum of two lengths may not equal the length of a concatenated
string due to kerning. If you need to adjust for kerning, include the following
character and subtract its length.
For example, instead of ::
hello = font.getlength("Hello")
world = font.getlength("World")
hello_world = hello + world # not adjusted for kerning
assert hello_world == font.getlength("HelloWorld") # may fail
use ::
hello = font.getlength("HelloW") - font.getlength("W") # adjusted for kerning
world = font.getlength("World")
hello_world = hello + world # adjusted for kerning
assert hello_world == font.getlength("HelloWorld") # True
or disable kerning with (requires libraqm) ::
hello = draw.textlength("Hello", font, features=["-kern"])
world = draw.textlength("World", font, features=["-kern"])
hello_world = hello + world # kerning is disabled, no need to adjust
assert hello_world == draw.textlength("HelloWorld", font, features=["-kern"])
.. versionadded:: 8.0.0
:param text: Text to measure.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
:param direction: Direction of the text. It can be 'rtl' (right to
left), 'ltr' (left to right) or 'ttb' (top to bottom).
Requires libraqm.
:param features: A list of OpenType font features to be used during text
layout. This is usually used to turn on optional
font features that are not enabled by default,
for example 'dlig' or 'ss01', but can be also
used to turn off default font features for
example '-liga' to disable ligatures or '-kern'
to disable kerning. To get all supported
features, see
https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist
Requires libraqm.
:param language: Language of the text. Different languages may use
different glyph shapes or ligatures. This parameter tells
the font which language the text is in, and to apply the
correct substitutions as appropriate, if available.
It should be a `BCP 47 language code
<https://www.w3.org/International/articles/language-tags/>`_
Requires libraqm.
:return: Width for horizontal, height for vertical text.
"""
return self.font.getlength(text, mode, direction, features, language) / 64
def getbbox(
self,
text,
mode="",
direction=None,
features=None,
language=None,
stroke_width=0,
anchor=None,
):
"""
Returns bounding box (in pixels) of given text relative to given anchor
when rendered in font with provided direction, features, and language.
Use :py:meth:`getlength()` to get the offset of following text with
1/64 pixel precision. The bounding box includes extra margins for
some fonts, e.g. italics or accents.
.. versionadded:: 8.0.0
:param text: Text to render.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
:param direction: Direction of the text. It can be 'rtl' (right to
left), 'ltr' (left to right) or 'ttb' (top to bottom).
Requires libraqm.
:param features: A list of OpenType font features to be used during text
layout. This is usually used to turn on optional
font features that are not enabled by default,
for example 'dlig' or 'ss01', but can be also
used to turn off default font features for
example '-liga' to disable ligatures or '-kern'
to disable kerning. To get all supported
features, see
https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist
Requires libraqm.
:param language: Language of the text. Different languages may use
different glyph shapes or ligatures. This parameter tells
the font which language the text is in, and to apply the
correct substitutions as appropriate, if available.
It should be a `BCP 47 language code
<https://www.w3.org/International/articles/language-tags/>`_
Requires libraqm.
:param stroke_width: The width of the text stroke.
:param anchor: The text anchor alignment. Determines the relative location of
the anchor to the text. The default alignment is top left.
See :ref:`text-anchors` for valid values.
:return: ``(left, top, right, bottom)`` bounding box
"""
size, offset = self.font.getsize(
text, mode, direction, features, language, anchor
)
left, top = offset[0] - stroke_width, offset[1] - stroke_width
width, height = size[0] + 2 * stroke_width, size[1] + 2 * stroke_width
return left, top, left + width, top + height
def getmask(
self,
text,
mode="",
direction=None,
features=None,
language=None,
stroke_width=0,
anchor=None,
ink=0,
start=None,
):
"""
Create a bitmap for the text.
If the font uses antialiasing, the bitmap should have mode ``L`` and use a
maximum value of 255. If the font has embedded color data, the bitmap
should have mode ``RGBA``. Otherwise, it should have mode ``1``.
:param text: Text to render.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
.. versionadded:: 1.1.5
:param direction: Direction of the text. It can be 'rtl' (right to
left), 'ltr' (left to right) or 'ttb' (top to bottom).
Requires libraqm.
.. versionadded:: 4.2.0
:param features: A list of OpenType font features to be used during text
layout. This is usually used to turn on optional
font features that are not enabled by default,
for example 'dlig' or 'ss01', but can be also
used to turn off default font features for
example '-liga' to disable ligatures or '-kern'
to disable kerning. To get all supported
features, see
https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist
Requires libraqm.
.. versionadded:: 4.2.0
:param language: Language of the text. Different languages may use
different glyph shapes or ligatures. This parameter tells
the font which language the text is in, and to apply the
correct substitutions as appropriate, if available.
It should be a `BCP 47 language code
<https://www.w3.org/International/articles/language-tags/>`_
Requires libraqm.
.. versionadded:: 6.0.0
:param stroke_width: The width of the text stroke.
.. versionadded:: 6.2.0
:param anchor: The text anchor alignment. Determines the relative location of
the anchor to the text. The default alignment is top left.
See :ref:`text-anchors` for valid values.
.. versionadded:: 8.0.0
:param ink: Foreground ink for rendering in RGBA mode.
.. versionadded:: 8.0.0
:param start: Tuple of horizontal and vertical offset, as text may render
differently when starting at fractional coordinates.
.. versionadded:: 9.4.0
:return: An internal PIL storage memory instance as defined by the
:py:mod:`PIL.Image.core` interface module.
"""
return self.getmask2(
text,
mode,
direction=direction,
features=features,
language=language,
stroke_width=stroke_width,
anchor=anchor,
ink=ink,
start=start,
)[0]
def getmask2(
self,
text,
mode="",
direction=None,
features=None,
language=None,
stroke_width=0,
anchor=None,
ink=0,
start=None,
*args,
**kwargs,
):
"""
Create a bitmap for the text.
If the font uses antialiasing, the bitmap should have mode ``L`` and use a
maximum value of 255. If the font has embedded color data, the bitmap
should have mode ``RGBA``. Otherwise, it should have mode ``1``.
:param text: Text to render.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
.. versionadded:: 1.1.5
:param direction: Direction of the text. It can be 'rtl' (right to
left), 'ltr' (left to right) or 'ttb' (top to bottom).
Requires libraqm.
.. versionadded:: 4.2.0
:param features: A list of OpenType font features to be used during text
layout. This is usually used to turn on optional
font features that are not enabled by default,
for example 'dlig' or 'ss01', but can be also
used to turn off default font features for
example '-liga' to disable ligatures or '-kern'
to disable kerning. To get all supported
features, see
https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist
Requires libraqm.
.. versionadded:: 4.2.0
:param language: Language of the text. Different languages may use
different glyph shapes or ligatures. This parameter tells
the font which language the text is in, and to apply the
correct substitutions as appropriate, if available.
It should be a `BCP 47 language code
<https://www.w3.org/International/articles/language-tags/>`_
Requires libraqm.
.. versionadded:: 6.0.0
:param stroke_width: The width of the text stroke.
.. versionadded:: 6.2.0
:param anchor: The text anchor alignment. Determines the relative location of
the anchor to the text. The default alignment is top left.
See :ref:`text-anchors` for valid values.
.. versionadded:: 8.0.0
:param ink: Foreground ink for rendering in RGBA mode.
.. versionadded:: 8.0.0
:param start: Tuple of horizontal and vertical offset, as text may render
differently when starting at fractional coordinates.
.. versionadded:: 9.4.0
:return: A tuple of an internal PIL storage memory instance as defined by the
:py:mod:`PIL.Image.core` interface module, and the text offset, the
gap between the starting coordinate and the first marking
"""
if start is None:
start = (0, 0)
im, size, offset = self.font.render(
text,
Image.core.fill,
mode,
direction,
features,
language,
stroke_width,
anchor,
ink,
start[0],
start[1],
Image.MAX_IMAGE_PIXELS,
)
Image._decompression_bomb_check(size)
return im, offset
def font_variant(
self, font=None, size=None, index=None, encoding=None, layout_engine=None
):
"""
Create a copy of this FreeTypeFont object,
using any specified arguments to override the settings.
Parameters are identical to the parameters used to initialize this
object.
:return: A FreeTypeFont object.
"""
if font is None:
try:
font = BytesIO(self.font_bytes)
except AttributeError:
font = self.path
return FreeTypeFont(
font=font,
size=self.size if size is None else size,
index=self.index if index is None else index,
encoding=self.encoding if encoding is None else encoding,
layout_engine=layout_engine or self.layout_engine,
)
def get_variation_names(self):
"""
:returns: A list of the named styles in a variation font.
:exception OSError: If the font is not a variation font.
"""
try:
names = self.font.getvarnames()
except AttributeError as e:
msg = "FreeType 2.9.1 or greater is required"
raise NotImplementedError(msg) from e
return [name.replace(b"\x00", b"") for name in names]
def set_variation_by_name(self, name):
"""
:param name: The name of the style.
:exception OSError: If the font is not a variation font.
"""
names = self.get_variation_names()
if not isinstance(name, bytes):
name = name.encode()
index = names.index(name) + 1
if index == getattr(self, "_last_variation_index", None):
# When the same name is set twice in a row,
# there is an 'unknown freetype error'
# https://savannah.nongnu.org/bugs/?56186
return
self._last_variation_index = index
self.font.setvarname(index)
def get_variation_axes(self):
"""
:returns: A list of the axes in a variation font.
:exception OSError: If the font is not a variation font.
"""
try:
axes = self.font.getvaraxes()
except AttributeError as e:
msg = "FreeType 2.9.1 or greater is required"
raise NotImplementedError(msg) from e
for axis in axes:
axis["name"] = axis["name"].replace(b"\x00", b"")
return axes
def set_variation_by_axes(self, axes):
"""
:param axes: A list of values for each axis.
:exception OSError: If the font is not a variation font.
"""
try:
self.font.setvaraxes(axes)
except AttributeError as e:
msg = "FreeType 2.9.1 or greater is required"
raise NotImplementedError(msg) from e
class TransposedFont:
"""Wrapper for writing rotated or mirrored text"""
def __init__(self, font, orientation=None):
"""
Wrapper that creates a transposed font from any existing font
object.
:param font: A font object.
:param orientation: An optional orientation. If given, this should
be one of Image.Transpose.FLIP_LEFT_RIGHT, Image.Transpose.FLIP_TOP_BOTTOM,
Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_180, or
Image.Transpose.ROTATE_270.
"""
self.font = font
self.orientation = orientation # any 'transpose' argument, or None
def getmask(self, text, mode="", *args, **kwargs):
im = self.font.getmask(text, mode, *args, **kwargs)
if self.orientation is not None:
return im.transpose(self.orientation)
return im
def getbbox(self, text, *args, **kwargs):
# TransposedFont doesn't support getmask2, move top-left point to (0, 0)
# this has no effect on ImageFont and simulates anchor="lt" for FreeTypeFont
left, top, right, bottom = self.font.getbbox(text, *args, **kwargs)
width = right - left
height = bottom - top
if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270):
return 0, 0, height, width
return 0, 0, width, height
def getlength(self, text, *args, **kwargs):
if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270):
msg = "text length is undefined for text rotated by 90 or 270 degrees"
raise ValueError(msg)
return self.font.getlength(text, *args, **kwargs)
def load(filename):
"""
Load a font file. This function loads a font object from the given
bitmap font file, and returns the corresponding font object.
:param filename: Name of font file.
:return: A font object.
:exception OSError: If the file could not be read.
"""
f = ImageFont()
f._load_pilfont(filename)
return f
def truetype(font=None, size=10, index=0, encoding="", layout_engine=None):
"""
Load a TrueType or OpenType font from a file or file-like object,
and create a font object.
This function loads a font object from the given file or file-like
object, and creates a font object for a font of the given size.
Pillow uses FreeType to open font files. On Windows, be aware that FreeType
will keep the file open as long as the FreeTypeFont object exists. Windows
limits the number of files that can be open in C at once to 512, so if many
fonts are opened simultaneously and that limit is approached, an
``OSError`` may be thrown, reporting that FreeType "cannot open resource".
A workaround would be to copy the file(s) into memory, and open that instead.
This function requires the _imagingft service.
:param font: A filename or file-like object containing a TrueType font.
If the file is not found in this filename, the loader may also
search in other directories, such as the :file:`fonts/`
directory on Windows or :file:`/Library/Fonts/`,
:file:`/System/Library/Fonts/` and :file:`~/Library/Fonts/` on
macOS.
:param size: The requested size, in pixels.
:param index: Which font face to load (default is first available face).
:param encoding: Which font encoding to use (default is Unicode). Possible
encodings include (see the FreeType documentation for more
information):
* "unic" (Unicode)
* "symb" (Microsoft Symbol)
* "ADOB" (Adobe Standard)
* "ADBE" (Adobe Expert)
* "ADBC" (Adobe Custom)
* "armn" (Apple Roman)
* "sjis" (Shift JIS)
* "gb " (PRC)
* "big5"
* "wans" (Extended Wansung)
* "joha" (Johab)
* "lat1" (Latin-1)
This specifies the character set to use. It does not alter the
encoding of any text provided in subsequent operations.
:param layout_engine: Which layout engine to use, if available:
:data:`.ImageFont.Layout.BASIC` or :data:`.ImageFont.Layout.RAQM`.
If it is available, Raqm layout will be used by default.
Otherwise, basic layout will be used.
Raqm layout is recommended for all non-English text. If Raqm layout
is not required, basic layout will have better performance.
You can check support for Raqm layout using
:py:func:`PIL.features.check_feature` with ``feature="raqm"``.
.. versionadded:: 4.2.0
:return: A font object.
:exception OSError: If the file could not be read.
"""
def freetype(font):
return FreeTypeFont(font, size, index, encoding, layout_engine)
try:
return freetype(font)
except OSError:
if not is_path(font):
raise
ttf_filename = os.path.basename(font)
dirs = []
if sys.platform == "win32":
# check the windows font repository
# NOTE: must use uppercase WINDIR, to work around bugs in
# 1.5.2's os.environ.get()
windir = os.environ.get("WINDIR")
if windir:
dirs.append(os.path.join(windir, "fonts"))
elif sys.platform in ("linux", "linux2"):
lindirs = os.environ.get("XDG_DATA_DIRS")
if not lindirs:
# According to the freedesktop spec, XDG_DATA_DIRS should
# default to /usr/share
lindirs = "/usr/share"
dirs += [os.path.join(lindir, "fonts") for lindir in lindirs.split(":")]
elif sys.platform == "darwin":
dirs += [
"/Library/Fonts",
"/System/Library/Fonts",
os.path.expanduser("~/Library/Fonts"),
]
ext = os.path.splitext(ttf_filename)[1]
first_font_with_a_different_extension = None
for directory in dirs:
for walkroot, walkdir, walkfilenames in os.walk(directory):
for walkfilename in walkfilenames:
if ext and walkfilename == ttf_filename:
return freetype(os.path.join(walkroot, walkfilename))
elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename:
fontpath = os.path.join(walkroot, walkfilename)
if os.path.splitext(fontpath)[1] == ".ttf":
return freetype(fontpath)
if not ext and first_font_with_a_different_extension is None:
first_font_with_a_different_extension = fontpath
if first_font_with_a_different_extension:
return freetype(first_font_with_a_different_extension)
raise
def load_path(filename):
"""
Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a
bitmap font along the Python path.
:param filename: Name of font file.
:return: A font object.
:exception OSError: If the file could not be read.
"""
for directory in sys.path:
if is_directory(directory):
if not isinstance(filename, str):
filename = filename.decode("utf-8")
try:
return load(os.path.join(directory, filename))
except OSError:
pass
msg = "cannot find font file"
raise OSError(msg)
def load_default():
"""Load a "better than nothing" default font.
.. versionadded:: 1.1.4
:return: A font object.
"""
f = ImageFont()
f._load_pilfont_data(
# courB08
BytesIO(
base64.b64decode(
b"""
UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA
BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL
AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA
AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB
ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A
BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB
//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA
AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH
AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA
ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv
AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/
/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5
AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA
AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG
AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA
BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA
AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA
2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF
AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA////
+gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA
////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA
BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv
AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA
AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA
AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA
BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP//
//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA
AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF
AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB
mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn
AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA
AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7
AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA
Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB
//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA
AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ
AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC
DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ
AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/
+wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5
AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/
///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG
AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA
BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA
Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC
eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG
AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA////
+gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA
////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA
BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT
AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A
AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA
Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA
Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP//
//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA
AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ
AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA
LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5
AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA
AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5
AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA
AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG
AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA
EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK
AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA
pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG
AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA////
+QAGAAIAzgAKANUAEw==
"""
)
),
Image.open(
BytesIO(
base64.b64decode(
b"""
iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u
Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9
M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g
LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F
IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA
Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791
NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx
in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9
SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY
AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt
y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG
ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY
lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H
/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3
AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47
c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/
/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw
pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv
oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR
evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA
AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v//
Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR
w7IkEbzhVQAAAABJRU5ErkJggg==
"""
)
)
),
)
return f
|
GHSA-8ghj-p4vj-mr35
|
libs/community/langchain_community/document_loaders/sitemap.py
|
@@ -1,6 +1,16 @@
import itertools
import re
-from typing import Any, Callable, Generator, Iterable, Iterator, List, Optional, Tuple
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Generator,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Tuple,
+)
from urllib.parse import urlparse
from langchain_core.documents import Document
@@ -75,6 +85,7 @@ def __init__(
is_local: bool = False,
continue_on_failure: bool = False,
restrict_to_same_domain: bool = True,
+ max_depth: int = 10,
**kwargs: Any,
):
"""Initialize with webpage path and optional filter URLs.
@@ -105,6 +116,7 @@ def __init__(
restrict_to_same_domain: whether to restrict loading to URLs to the same
domain as the sitemap. Attention: This is only applied if the sitemap
is not a local file!
+ max_depth: maximum depth to follow sitemap links. Default: 10
"""
if blocksize is not None and blocksize < 1:
@@ -134,17 +146,23 @@ def __init__(
self.blocknum = blocknum
self.is_local = is_local
self.continue_on_failure = continue_on_failure
+ self.max_depth = max_depth
- def parse_sitemap(self, soup: Any) -> List[dict]:
+ def parse_sitemap(self, soup: Any, *, depth: int = 0) -> List[dict]:
"""Parse sitemap xml and load into a list of dicts.
Args:
soup: BeautifulSoup object.
+ depth: current depth of the sitemap. Default: 0
Returns:
List of dicts.
"""
- els = []
+ if depth >= self.max_depth:
+ return []
+
+ els: List[Dict] = []
+
for url in soup.find_all("url"):
loc = url.find("loc")
if not loc:
@@ -177,9 +195,9 @@ def parse_sitemap(self, soup: Any) -> List[dict]:
loc = sitemap.find("loc")
if not loc:
continue
- soup_child = self.scrape_all([loc.text], "xml")[0]
- els.extend(self.parse_sitemap(soup_child))
+ soup_child = self.scrape_all([loc.text], "xml")[0]
+ els.extend(self.parse_sitemap(soup_child, depth=depth + 1))
return els
def lazy_load(self) -> Iterator[Document]:
|
import itertools
import re
from typing import Any, Callable, Generator, Iterable, Iterator, List, Optional, Tuple
from urllib.parse import urlparse
from langchain_core.documents import Document
from langchain_community.document_loaders.web_base import WebBaseLoader
def _default_parsing_function(content: Any) -> str:
return str(content.get_text())
def _default_meta_function(meta: dict, _content: Any) -> dict:
return {"source": meta["loc"], **meta}
def _batch_block(iterable: Iterable, size: int) -> Generator[List[dict], None, None]:
it = iter(iterable)
while item := list(itertools.islice(it, size)):
yield item
def _extract_scheme_and_domain(url: str) -> Tuple[str, str]:
"""Extract the scheme + domain from a given URL.
Args:
url (str): The input URL.
Returns:
return a 2-tuple of scheme and domain
"""
parsed_uri = urlparse(url)
return parsed_uri.scheme, parsed_uri.netloc
class SitemapLoader(WebBaseLoader):
"""Load a sitemap and its URLs.
**Security Note**: This loader can be used to load all URLs specified in a sitemap.
If a malicious actor gets access to the sitemap, they could force
the server to load URLs from other domains by modifying the sitemap.
This could lead to server-side request forgery (SSRF) attacks; e.g.,
with the attacker forcing the server to load URLs from internal
service endpoints that are not publicly accessible. While the attacker
may not immediately gain access to this data, this data could leak
into downstream systems (e.g., data loader is used to load data for indexing).
This loader is a crawler and web crawlers should generally NOT be deployed
with network access to any internal servers.
Control access to who can submit crawling requests and what network access
the crawler has.
By default, the loader will only load URLs from the same domain as the sitemap
if the site map is not a local file. This can be disabled by setting
restrict_to_same_domain to False (not recommended).
If the site map is a local file, no such risk mitigation is applied by default.
Use the filter URLs argument to limit which URLs can be loaded.
See https://python.langchain.com/docs/security
"""
def __init__(
self,
web_path: str,
filter_urls: Optional[List[str]] = None,
parsing_function: Optional[Callable] = None,
blocksize: Optional[int] = None,
blocknum: int = 0,
meta_function: Optional[Callable] = None,
is_local: bool = False,
continue_on_failure: bool = False,
restrict_to_same_domain: bool = True,
**kwargs: Any,
):
"""Initialize with webpage path and optional filter URLs.
Args:
web_path: url of the sitemap. can also be a local path
filter_urls: a list of regexes. If specified, only
URLS that match one of the filter URLs will be loaded.
*WARNING* The filter URLs are interpreted as regular expressions.
Remember to escape special characters if you do not want them to be
interpreted as regular expression syntax. For example, `.` appears
frequently in URLs and should be escaped if you want to match a literal
`.` rather than any character.
restrict_to_same_domain takes precedence over filter_urls when
restrict_to_same_domain is True and the sitemap is not a local file.
parsing_function: Function to parse bs4.Soup output
blocksize: number of sitemap locations per block
blocknum: the number of the block that should be loaded - zero indexed.
Default: 0
meta_function: Function to parse bs4.Soup output for metadata
remember when setting this method to also copy metadata["loc"]
to metadata["source"] if you are using this field
is_local: whether the sitemap is a local file. Default: False
continue_on_failure: whether to continue loading the sitemap if an error
occurs loading a url, emitting a warning instead of raising an
exception. Setting this to True makes the loader more robust, but also
may result in missing data. Default: False
restrict_to_same_domain: whether to restrict loading to URLs to the same
domain as the sitemap. Attention: This is only applied if the sitemap
is not a local file!
"""
if blocksize is not None and blocksize < 1:
raise ValueError("Sitemap blocksize should be at least 1")
if blocknum < 0:
raise ValueError("Sitemap blocknum can not be lower then 0")
try:
import lxml # noqa:F401
except ImportError:
raise ImportError(
"lxml package not found, please install it with `pip install lxml`"
)
super().__init__(web_paths=[web_path], **kwargs)
# Define a list of URL patterns (interpreted as regular expressions) that
# will be allowed to be loaded.
# restrict_to_same_domain takes precedence over filter_urls when
# restrict_to_same_domain is True and the sitemap is not a local file.
self.allow_url_patterns = filter_urls
self.restrict_to_same_domain = restrict_to_same_domain
self.parsing_function = parsing_function or _default_parsing_function
self.meta_function = meta_function or _default_meta_function
self.blocksize = blocksize
self.blocknum = blocknum
self.is_local = is_local
self.continue_on_failure = continue_on_failure
def parse_sitemap(self, soup: Any) -> List[dict]:
"""Parse sitemap xml and load into a list of dicts.
Args:
soup: BeautifulSoup object.
Returns:
List of dicts.
"""
els = []
for url in soup.find_all("url"):
loc = url.find("loc")
if not loc:
continue
# Strip leading and trailing whitespace and newlines
loc_text = loc.text.strip()
if self.restrict_to_same_domain and not self.is_local:
if _extract_scheme_and_domain(loc_text) != _extract_scheme_and_domain(
self.web_path
):
continue
if self.allow_url_patterns and not any(
re.match(regexp_pattern, loc_text)
for regexp_pattern in self.allow_url_patterns
):
continue
els.append(
{
tag: prop.text
for tag in ["loc", "lastmod", "changefreq", "priority"]
if (prop := url.find(tag))
}
)
for sitemap in soup.find_all("sitemap"):
loc = sitemap.find("loc")
if not loc:
continue
soup_child = self.scrape_all([loc.text], "xml")[0]
els.extend(self.parse_sitemap(soup_child))
return els
def lazy_load(self) -> Iterator[Document]:
"""Load sitemap."""
if self.is_local:
try:
import bs4
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it"
" with `pip install beautifulsoup4`"
)
fp = open(self.web_path)
soup = bs4.BeautifulSoup(fp, "xml")
else:
soup = self._scrape(self.web_path, parser="xml")
els = self.parse_sitemap(soup)
if self.blocksize is not None:
elblocks = list(_batch_block(els, self.blocksize))
blockcount = len(elblocks)
if blockcount - 1 < self.blocknum:
raise ValueError(
"Selected sitemap does not contain enough blocks for given blocknum"
)
else:
els = elblocks[self.blocknum]
results = self.scrape_all([el["loc"].strip() for el in els if "loc" in el])
for i, result in enumerate(results):
yield Document(
page_content=self.parsing_function(result),
metadata=self.meta_function(els[i], result),
)
|
PYSEC-2024-118
|
mlflow/store/artifact/ftp_artifact_repo.py
|
@@ -106,10 +106,10 @@ def list_artifacts(self, path=None):
if not self._is_dir(ftp, list_dir):
return []
artifact_files = ftp.nlst(list_dir)
- artifact_files = list(filter(lambda x: x != "." and x != "..", artifact_files))
# Make sure artifact_files is a list of file names because ftp.nlst
# may return absolute paths.
artifact_files = [os.path.basename(f) for f in artifact_files]
+ artifact_files = list(filter(lambda x: x != "." and x != "..", artifact_files))
infos = []
for file_name in artifact_files:
file_path = file_name if path is None else posixpath.join(path, file_name)
|
import ftplib
import os
import posixpath
import urllib.parse
from contextlib import contextmanager
from ftplib import FTP
from urllib.parse import unquote
from mlflow.entities.file_info import FileInfo
from mlflow.exceptions import MlflowException
from mlflow.store.artifact.artifact_repo import ArtifactRepository
from mlflow.utils.file_utils import relative_path_to_artifact_path
class FTPArtifactRepository(ArtifactRepository):
"""Stores artifacts as files in a remote directory, via ftp."""
def __init__(self, artifact_uri):
self.uri = artifact_uri
parsed = urllib.parse.urlparse(artifact_uri)
self.config = {
"host": parsed.hostname,
"port": 21 if parsed.port is None else parsed.port,
"username": parsed.username,
"password": parsed.password,
}
self.path = parsed.path or "/"
if self.config["host"] is None:
self.config["host"] = "localhost"
if self.config["password"] is None:
self.config["password"] = ""
else:
self.config["password"] = unquote(parsed.password)
super().__init__(artifact_uri)
@contextmanager
def get_ftp_client(self):
ftp = FTP()
ftp.connect(self.config["host"], self.config["port"])
ftp.login(self.config["username"], self.config["password"])
yield ftp
ftp.close()
@staticmethod
def _is_dir(ftp, full_file_path):
try:
ftp.cwd(full_file_path)
return True
except ftplib.error_perm:
return False
@staticmethod
def _mkdir(ftp, artifact_dir):
try:
if not FTPArtifactRepository._is_dir(ftp, artifact_dir):
ftp.mkd(artifact_dir)
except ftplib.error_perm:
head, _ = posixpath.split(artifact_dir)
FTPArtifactRepository._mkdir(ftp, head)
FTPArtifactRepository._mkdir(ftp, artifact_dir)
@staticmethod
def _size(ftp, full_file_path):
ftp.voidcmd("TYPE I")
size = ftp.size(full_file_path)
ftp.voidcmd("TYPE A")
return size
def log_artifact(self, local_file, artifact_path=None):
with self.get_ftp_client() as ftp:
artifact_dir = posixpath.join(self.path, artifact_path) if artifact_path else self.path
self._mkdir(ftp, artifact_dir)
with open(local_file, "rb") as f:
ftp.cwd(artifact_dir)
ftp.storbinary("STOR " + os.path.basename(local_file), f)
def log_artifacts(self, local_dir, artifact_path=None):
dest_path = posixpath.join(self.path, artifact_path) if artifact_path else self.path
local_dir = os.path.abspath(local_dir)
for root, _, filenames in os.walk(local_dir):
upload_path = dest_path
if root != local_dir:
rel_path = os.path.relpath(root, local_dir)
rel_upload_path = relative_path_to_artifact_path(rel_path)
upload_path = posixpath.join(dest_path, rel_upload_path)
if not filenames:
with self.get_ftp_client() as ftp:
self._mkdir(ftp, upload_path)
for f in filenames:
if os.path.isfile(os.path.join(root, f)):
self.log_artifact(os.path.join(root, f), upload_path)
def _is_directory(self, artifact_path):
artifact_dir = self.path
list_dir = posixpath.join(artifact_dir, artifact_path) if artifact_path else artifact_dir
with self.get_ftp_client() as ftp:
return self._is_dir(ftp, list_dir)
def list_artifacts(self, path=None):
with self.get_ftp_client() as ftp:
artifact_dir = self.path
list_dir = posixpath.join(artifact_dir, path) if path else artifact_dir
if not self._is_dir(ftp, list_dir):
return []
artifact_files = ftp.nlst(list_dir)
artifact_files = list(filter(lambda x: x != "." and x != "..", artifact_files))
# Make sure artifact_files is a list of file names because ftp.nlst
# may return absolute paths.
artifact_files = [os.path.basename(f) for f in artifact_files]
infos = []
for file_name in artifact_files:
file_path = file_name if path is None else posixpath.join(path, file_name)
full_file_path = posixpath.join(list_dir, file_name)
if self._is_dir(ftp, full_file_path):
infos.append(FileInfo(file_path, True, None))
else:
size = self._size(ftp, full_file_path)
infos.append(FileInfo(file_path, False, size))
return infos
def _download_file(self, remote_file_path, local_path):
remote_full_path = (
posixpath.join(self.path, remote_file_path) if remote_file_path else self.path
)
with self.get_ftp_client() as ftp:
with open(local_path, "wb") as f:
ftp.retrbinary("RETR " + remote_full_path, f.write)
def delete_artifacts(self, artifact_path=None):
raise MlflowException("Not implemented yet")
|
GHSA-hh8p-p8mp-gqhm
|
tests/store/artifact/test_ftp_artifact_repo.py
|
@@ -67,6 +67,18 @@ def test_list_artifacts(ftp_mock):
assert artifacts[1].file_size is None
+def test_list_artifacts_malicious_path(ftp_mock):
+ artifact_root_path = "/experiment_id/run_id/"
+ repo = FTPArtifactRepository("ftp://test_ftp" + artifact_root_path)
+ repo.get_ftp_client = MagicMock()
+ call_mock = MagicMock(return_value=ftp_mock)
+ repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
+ ftp_mock.nlst = MagicMock(return_value=[".", "/.", "/..", "//.."])
+
+ artifacts = repo.list_artifacts(path=None)
+ assert artifacts == []
+
+
def test_list_artifacts_when_ftp_nlst_returns_absolute_paths(ftp_mock):
artifact_root_path = "/experiment_id/run_id/"
repo = FTPArtifactRepository("ftp://test_ftp" + artifact_root_path)
|
# pylint: disable=redefined-outer-name
import ftplib
import posixpath
from ftplib import FTP
from unittest.mock import MagicMock
import pytest
from mlflow.store.artifact.artifact_repository_registry import get_artifact_repository
from mlflow.store.artifact.ftp_artifact_repo import FTPArtifactRepository
@pytest.fixture
def ftp_mock():
return MagicMock(autospec=FTP)
def test_artifact_uri_factory():
repo = get_artifact_repository("ftp://user:pass@test_ftp:123/some/path")
assert isinstance(repo, FTPArtifactRepository)
def test_list_artifacts_empty(ftp_mock):
repo = FTPArtifactRepository("ftp://test_ftp/some/path")
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
ftp_mock.nlst = MagicMock(return_value=[])
assert repo.list_artifacts() == []
ftp_mock.nlst.assert_called_once_with("/some/path")
def test_list_artifacts(ftp_mock):
artifact_root_path = "/experiment_id/run_id/"
repo = FTPArtifactRepository("ftp://test_ftp" + artifact_root_path)
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
# mocked file structure
# |- file
# |- model
# |- model.pb
file_path = "file"
file_size = 678
dir_path = "model"
ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None])
ftp_mock.nlst = MagicMock(return_value=[file_path, dir_path])
ftp_mock.size = MagicMock(return_value=file_size)
artifacts = repo.list_artifacts(path=None)
ftp_mock.nlst.assert_called_once_with(artifact_root_path)
ftp_mock.size.assert_called_once_with(artifact_root_path + file_path)
assert len(artifacts) == 2
assert artifacts[0].path == file_path
assert artifacts[0].is_dir is False
assert artifacts[0].file_size == file_size
assert artifacts[1].path == dir_path
assert artifacts[1].is_dir is True
assert artifacts[1].file_size is None
def test_list_artifacts_when_ftp_nlst_returns_absolute_paths(ftp_mock):
artifact_root_path = "/experiment_id/run_id/"
repo = FTPArtifactRepository("ftp://test_ftp" + artifact_root_path)
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
# mocked file structure
# |- file
# |- model
# |- model.pb
file_path = "file"
dir_path = "model"
file_size = 678
ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None])
ftp_mock.nlst = MagicMock(
return_value=[
posixpath.join(artifact_root_path, file_path),
posixpath.join(artifact_root_path, dir_path),
]
)
ftp_mock.size = MagicMock(return_value=file_size)
artifacts = repo.list_artifacts(path=None)
ftp_mock.nlst.assert_called_once_with(artifact_root_path)
ftp_mock.size.assert_called_once_with(artifact_root_path + file_path)
assert len(artifacts) == 2
assert artifacts[0].path == file_path
assert artifacts[0].is_dir is False
assert artifacts[0].file_size == file_size
assert artifacts[1].path == dir_path
assert artifacts[1].is_dir is True
assert artifacts[1].file_size is None
def test_list_artifacts_with_subdir(ftp_mock):
artifact_root_path = "/experiment_id/run_id/"
repo = FTPArtifactRepository("sftp://test_sftp" + artifact_root_path)
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
# mocked file structure
# |- model
# |- model.pb
# |- variables
dir_name = "model"
# list artifacts at sub directory level
file_path = "model.pb"
file_size = 345
subdir_name = "variables"
ftp_mock.nlst = MagicMock(return_value=[file_path, subdir_name])
ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None])
ftp_mock.size = MagicMock(return_value=file_size)
artifacts = repo.list_artifacts(path=dir_name)
ftp_mock.nlst.assert_called_once_with(artifact_root_path + dir_name)
ftp_mock.size.assert_called_once_with(artifact_root_path + dir_name + "/" + file_path)
assert len(artifacts) == 2
assert artifacts[0].path == dir_name + "/" + file_path
assert artifacts[0].is_dir is False
assert artifacts[0].file_size == file_size
assert artifacts[1].path == dir_name + "/" + subdir_name
assert artifacts[1].is_dir is True
assert artifacts[1].file_size is None
def test_log_artifact(ftp_mock, tmp_path):
repo = FTPArtifactRepository("ftp://test_ftp/some/path")
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
d = tmp_path.joinpath("data")
d.mkdir()
f = d.joinpath("test.txt")
f.write_text("hello world!")
fpath = d.joinpath("test.txt")
fpath = str(fpath)
ftp_mock.cwd = MagicMock(side_effect=[ftplib.error_perm, None])
repo.log_artifact(fpath)
ftp_mock.mkd.assert_called_once_with("/some/path")
ftp_mock.cwd.assert_called_with("/some/path")
ftp_mock.storbinary.assert_called_once()
assert ftp_mock.storbinary.call_args_list[0][0][0] == "STOR test.txt"
def test_log_artifact_multiple_calls(ftp_mock, tmp_path):
repo = FTPArtifactRepository("ftp://test_ftp/some/path")
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
d = tmp_path.joinpath("data")
d.mkdir()
file1 = d.joinpath("test1.txt")
file1.write_text("hello world!")
fpath1 = d.joinpath("test1.txt")
fpath1 = str(fpath1)
file2 = d.joinpath("test2.txt")
file2.write_text("hello world!")
fpath2 = d.joinpath("test2.txt")
fpath2 = str(fpath2)
ftp_mock.cwd = MagicMock(
side_effect=[ftplib.error_perm, None, ftplib.error_perm, None, None, None]
)
repo.log_artifact(fpath1)
ftp_mock.mkd.assert_called_once_with("/some/path")
ftp_mock.cwd.assert_called_with("/some/path")
ftp_mock.storbinary.assert_called()
assert ftp_mock.storbinary.call_args_list[0][0][0] == "STOR test1.txt"
ftp_mock.reset_mock()
repo.log_artifact(fpath1, "subdir")
ftp_mock.mkd.assert_called_once_with("/some/path/subdir")
ftp_mock.cwd.assert_called_with("/some/path/subdir")
ftp_mock.storbinary.assert_called()
assert ftp_mock.storbinary.call_args_list[0][0][0] == "STOR test1.txt"
ftp_mock.reset_mock()
repo.log_artifact(fpath2)
ftp_mock.mkd.assert_not_called()
ftp_mock.cwd.assert_called_with("/some/path")
ftp_mock.storbinary.assert_called()
assert ftp_mock.storbinary.call_args_list[0][0][0] == "STOR test2.txt"
def __posixpath_parents(pathname, root):
parents = [posixpath.dirname(pathname)]
root = posixpath.normpath(root)
while parents[-1] != "/" and parents[-1] != root:
parents.append(posixpath.dirname(parents[-1]))
return parents
@pytest.mark.parametrize("artifact_path", [None, "dir", "dir1/dir2"])
def test_log_artifacts(artifact_path, ftp_mock, tmp_path):
# Setup FTP mock.
dest_path_root = "/some/path"
repo = FTPArtifactRepository("ftp://test_ftp" + dest_path_root)
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
dirs_created = {dest_path_root}
files_created = set()
cwd_history = ["/"]
def mkd_mock(pathname):
abs_pathname = posixpath.join(cwd_history[-1], pathname)
if posixpath.dirname(abs_pathname) not in dirs_created:
raise ftplib.error_perm
dirs_created.add(abs_pathname)
ftp_mock.mkd = MagicMock(side_effect=mkd_mock)
def cwd_mock(pathname):
abs_pathname = posixpath.join(cwd_history[-1], pathname)
if abs_pathname not in dirs_created:
raise ftplib.error_perm
cwd_history.append(abs_pathname)
ftp_mock.cwd = MagicMock(side_effect=cwd_mock)
def storbinary_mock(cmd, _):
head, basename = cmd.split(" ", 1)
assert head == "STOR"
assert "/" not in basename
files_created.add(posixpath.join(cwd_history[-1], basename))
ftp_mock.storbinary = MagicMock(side_effect=storbinary_mock)
# Test
data = tmp_path.joinpath("data")
data.mkdir()
subd = data.joinpath("subdir")
subd.mkdir()
subd.joinpath("a.txt").write_text("A")
subd.joinpath("b.txt").write_text("B")
subd.joinpath("c.txt").write_text("C")
subd.joinpath("empty1").mkdir()
subsubd = subd.joinpath("subsubdir")
subsubd.mkdir()
subsubd.joinpath("aa.txt").write_text("AA")
subsubd.joinpath("bb.txt").write_text("BB")
subsubd.joinpath("cc.txt").write_text("CC")
subsubd.joinpath("empty2").mkdir()
dest_path = (
dest_path_root if artifact_path is None else posixpath.join(dest_path_root, artifact_path)
)
dirs_expected = {
dest_path,
posixpath.join(dest_path, "empty1"),
posixpath.join(dest_path, "subsubdir"),
posixpath.join(dest_path, "subsubdir", "empty2"),
}
files_expected = {
posixpath.join(dest_path, "a.txt"),
posixpath.join(dest_path, "b.txt"),
posixpath.join(dest_path, "c.txt"),
posixpath.join(dest_path, "subsubdir/aa.txt"),
posixpath.join(dest_path, "subsubdir/bb.txt"),
posixpath.join(dest_path, "subsubdir/cc.txt"),
}
for dirs_expected_i in dirs_expected.copy():
if dirs_expected_i != dest_path_root:
dirs_expected |= set(__posixpath_parents(dirs_expected_i, root=dest_path_root))
repo.log_artifacts(subd, artifact_path)
assert dirs_created == dirs_expected
assert files_created == files_expected
def test_download_artifacts_single(ftp_mock):
repo = FTPArtifactRepository("ftp://test_ftp/some/path")
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
ftp_mock.cwd = MagicMock(side_effect=ftplib.error_perm)
repo.download_artifacts("test.txt")
ftp_mock.retrbinary.assert_called_once()
assert ftp_mock.retrbinary.call_args_list[0][0][0] == "RETR /some/path/test.txt"
def test_download_artifacts(ftp_mock):
artifact_root_path = "/some/path"
repo = FTPArtifactRepository("ftp://test_ftp" + artifact_root_path)
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
# mocked file structure
# |- model
# |- model.pb
# |- empty_dir
# |- variables
# |- test.txt
dir_path = posixpath.join(artifact_root_path, "model")
# list artifacts at sub directory level
model_file_path_sub = "model.pb"
model_file_path_full = posixpath.join(dir_path, model_file_path_sub)
empty_dir_name = "empty_dir"
empty_dir_path = posixpath.join(dir_path, empty_dir_name)
subdir_name = "variables"
subdir_path_full = posixpath.join(dir_path, subdir_name)
subfile_name = "test.txt"
subfile_path_full = posixpath.join(artifact_root_path, subdir_path_full, subfile_name)
is_dir_mapping = {
dir_path: True,
empty_dir_path: True,
model_file_path_full: False,
subdir_path_full: True,
subfile_path_full: False,
}
is_dir_call_args = [
dir_path,
model_file_path_full,
empty_dir_path,
subdir_path_full,
model_file_path_full,
subdir_path_full,
subfile_path_full,
subfile_path_full,
]
def cwd_side_effect(call_arg):
if not is_dir_mapping[call_arg]:
raise ftplib.error_perm
ftp_mock.cwd = MagicMock(side_effect=cwd_side_effect)
def nlst_side_effect(call_arg):
if call_arg == dir_path:
return [model_file_path_sub, subdir_name, empty_dir_name]
elif call_arg == subdir_path_full:
return [subfile_name]
elif call_arg == empty_dir_path:
return []
else:
raise Exception(f"should never call nlst for non-directories {call_arg}")
ftp_mock.nlst = MagicMock(side_effect=nlst_side_effect)
repo.download_artifacts("model")
cwd_call_args = [arg_entry[0][0] for arg_entry in ftp_mock.cwd.call_args_list]
assert set(cwd_call_args) == set(is_dir_call_args)
assert ftp_mock.nlst.call_count == 3
assert ftp_mock.retrbinary.call_args_list[0][0][0] == "RETR " + model_file_path_full
assert ftp_mock.retrbinary.call_args_list[1][0][0] == "RETR " + subfile_path_full
def test_log_artifact_reuse_ftp_client(ftp_mock, tmp_path):
repo = FTPArtifactRepository("ftp://test_ftp/some/path")
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
d = tmp_path.joinpath("data")
d.mkdir()
file = d.joinpath("test.txt")
file.write_text("hello world!")
fpath = str(file)
repo.log_artifact(fpath)
repo.log_artifact(fpath, "subdir1/subdir2")
repo.log_artifact(fpath, "subdir3")
assert repo.get_ftp_client.call_count == 3
|
GHSA-hh8p-p8mp-gqhm
|
tensorflow/python/ops/bincount_ops_test.py
|
@@ -25,7 +25,9 @@
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.framework import test_util
from tensorflow.python.ops import bincount_ops
+from tensorflow.python.ops import gen_count_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
@@ -834,5 +836,121 @@ def test_ragged_input_different_shape_fails(self):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
+@test_util.run_all_in_graph_and_eager_modes
+@test_util.disable_tfrt
+class RawOpsTest(test.TestCase, parameterized.TestCase):
+
+ def testSparseCountSparseOutputBadIndicesShape(self):
+ indices = [[[0], [0]], [[0], [1]], [[1], [0]], [[1], [2]]]
+ values = [1, 1, 1, 10]
+ weights = [1, 2, 4, 6]
+ dense_shape = [2, 3]
+ with self.assertRaisesRegex(errors.InvalidArgumentError,
+ "Input indices must be a 2-dimensional tensor"):
+ self.evaluate(
+ gen_count_ops.SparseCountSparseOutput(
+ indices=indices,
+ values=values,
+ dense_shape=dense_shape,
+ weights=weights,
+ binary_output=False))
+
+ def testSparseCountSparseOutputBadWeightsShape(self):
+ indices = [[0, 0], [0, 1], [1, 0], [1, 2]]
+ values = [1, 1, 1, 10]
+ weights = [1, 2, 4]
+ dense_shape = [2, 3]
+ with self.assertRaisesRegex(errors.InvalidArgumentError,
+ "Weights and values must have the same shape"):
+ self.evaluate(
+ gen_count_ops.SparseCountSparseOutput(
+ indices=indices,
+ values=values,
+ dense_shape=dense_shape,
+ weights=weights,
+ binary_output=False))
+
+ def testSparseCountSparseOutputBadNumberOfValues(self):
+ indices = [[0, 0], [0, 1], [1, 0]]
+ values = [1, 1, 1, 10]
+ weights = [1, 2, 4, 6]
+ dense_shape = [2, 3]
+ with self.assertRaisesRegex(
+ errors.InvalidArgumentError,
+ "Number of values must match first dimension of indices"):
+ self.evaluate(
+ gen_count_ops.SparseCountSparseOutput(
+ indices=indices,
+ values=values,
+ dense_shape=dense_shape,
+ weights=weights,
+ binary_output=False))
+
+ def testRaggedCountSparseOutput(self):
+ splits = [0, 4, 7]
+ values = [1, 1, 2, 1, 2, 10, 5]
+ weights = [1, 2, 3, 4, 5, 6, 7]
+ output_indices, output_values, output_shape = self.evaluate(
+ gen_count_ops.RaggedCountSparseOutput(
+ splits=splits, values=values, weights=weights, binary_output=False))
+ self.assertAllEqual([[0, 1], [0, 2], [1, 2], [1, 5], [1, 10]],
+ output_indices)
+ self.assertAllEqual([7, 3, 5, 7, 6], output_values)
+ self.assertAllEqual([2, 11], output_shape)
+
+ def testRaggedCountSparseOutputBadWeightsShape(self):
+ splits = [0, 4, 7]
+ values = [1, 1, 2, 1, 2, 10, 5]
+ weights = [1, 2, 3, 4, 5, 6]
+ with self.assertRaisesRegex(errors.InvalidArgumentError,
+ "Weights and values must have the same shape"):
+ self.evaluate(
+ gen_count_ops.RaggedCountSparseOutput(
+ splits=splits,
+ values=values,
+ weights=weights,
+ binary_output=False))
+
+ def testRaggedCountSparseOutputEmptySplits(self):
+ splits = []
+ values = [1, 1, 2, 1, 2, 10, 5]
+ weights = [1, 2, 3, 4, 5, 6, 7]
+ with self.assertRaisesRegex(
+ errors.InvalidArgumentError,
+ "Must provide at least 2 elements for the splits argument"):
+ self.evaluate(
+ gen_count_ops.RaggedCountSparseOutput(
+ splits=splits,
+ values=values,
+ weights=weights,
+ binary_output=False))
+
+ def testRaggedCountSparseOutputBadSplitsStart(self):
+ splits = [1, 7]
+ values = [1, 1, 2, 1, 2, 10, 5]
+ weights = [1, 2, 3, 4, 5, 6, 7]
+ with self.assertRaisesRegex(errors.InvalidArgumentError,
+ "Splits must start with 0"):
+ self.evaluate(
+ gen_count_ops.RaggedCountSparseOutput(
+ splits=splits,
+ values=values,
+ weights=weights,
+ binary_output=False))
+
+ def testRaggedCountSparseOutputBadSplitsEnd(self):
+ splits = [0, 5]
+ values = [1, 1, 2, 1, 2, 10, 5]
+ weights = [1, 2, 3, 4, 5, 6, 7]
+ with self.assertRaisesRegex(errors.InvalidArgumentError,
+ "Splits must end with the number of values"):
+ self.evaluate(
+ gen_count_ops.RaggedCountSparseOutput(
+ splits=splits,
+ values=values,
+ weights=weights,
+ binary_output=False))
+
+
if __name__ == "__main__":
test.main()
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# maxlengthations under the License.
# ==============================================================================
"""Tests for bincount ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import bincount_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
class TestSparseCount(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "_no_maxlength",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [2, 6]
}, {
"testcase_name": "_maxlength",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"maxlength": 7,
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]],
"expected_values": [1, 1, 1, 1, 2],
"expected_shape": [2, 7]
}, {
"testcase_name": "_minlength",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 9,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 2, 1],
"expected_shape": [2, 9]
}, {
"testcase_name": "_minlength_larger_values",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 3,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 2, 1],
"expected_shape": [2, 8]
}, {
"testcase_name": "_no_maxlength_binary",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [2, 6],
"binary_output": True,
}, {
"testcase_name": "_maxlength_binary",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"maxlength": 7,
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [2, 7],
"binary_output": True,
}, {
"testcase_name": "_minlength_binary",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 9,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 1, 1],
"expected_shape": [2, 9],
"binary_output": True,
}, {
"testcase_name": "_minlength_larger_values_binary",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 3,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 1, 1],
"expected_shape": [2, 8],
"binary_output": True,
}, {
"testcase_name": "_no_maxlength_weights",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]],
"expected_values": [2, 1, 0.5, 9, 3],
"expected_shape": [2, 6],
"weights": [[0.5, 1, 2], [3, 4, 5]]
}, {
"testcase_name": "_maxlength_weights",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"maxlength": 7,
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]],
"expected_values": [2, 1, 0.5, 3, 9],
"expected_shape": [2, 7],
"weights": [[0.5, 1, 2, 11], [7, 3, 4, 5]]
}, {
"testcase_name": "_minlength_weights",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 9,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [2, 1, 0.5, 3, 5, 13, 4],
"expected_shape": [2, 9],
"weights": [[0.5, 1, 2, 3], [4, 5, 6, 7]]
}, {
"testcase_name": "_minlength_larger_values_weights",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 3,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [2, 1, 0.5, 3, 5, 13, 4],
"expected_shape": [2, 8],
"weights": [[0.5, 1, 2, 3], [4, 5, 6, 7]]
}, {
"testcase_name": "_1d",
"x": np.array([3, 2, 1, 1], dtype=np.int32),
"expected_indices": [[1], [2], [3]],
"expected_values": [2, 1, 1],
"expected_shape": [4]
}, {
"testcase_name": "_all_axes",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[1], [2], [3], [4], [5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [6],
"axis": None
})
def test_dense_input(self,
x,
expected_indices,
expected_values,
expected_shape,
minlength=None,
maxlength=None,
binary_output=False,
weights=None,
axis=-1):
y = bincount_ops.sparse_bincount(
x,
weights=weights,
minlength=minlength,
maxlength=maxlength,
binary_output=binary_output,
axis=axis)
self.assertAllEqual(expected_indices, y.indices)
self.assertAllEqual(expected_values, y.values)
self.assertAllEqual(expected_shape, y.dense_shape)
@parameterized.named_parameters(
{
"testcase_name":
"_no_maxlength",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 2, 1],
"expected_shape": [3, 6],
},
{
"testcase_name":
"_maxlength",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 2, 1],
"expected_shape": [3, 7],
"maxlength":
7,
},
{
"testcase_name":
"_minlength",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [3, 9],
"minlength":
9,
},
{
"testcase_name":
"_minlength_larger_values",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [3, 8],
"minlength":
3,
},
{
"testcase_name":
"_no_maxlength_binary",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1],
"expected_shape": [3, 6],
"binary_output":
True,
},
{
"testcase_name":
"_maxlength_binary",
"x":
np.array([[3, 0, 1, 0], [0, 0, 7, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1],
"expected_shape": [3, 7],
"maxlength":
7,
"binary_output":
True,
},
{
"testcase_name":
"_minlength_binary",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [3, 9],
"minlength":
9,
"binary_output":
True,
},
{
"testcase_name":
"_minlength_larger_values_binary",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [3, 8],
"minlength":
3,
"binary_output":
True,
},
{
"testcase_name":
"_no_maxlength_weights",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [2, 6, 7, 10],
"expected_shape": [3, 6],
"weights":
np.array([[6, 0, 2, 0], [0, 0, 0, 0], [10, 0, 3.5, 3.5]]),
},
{
"testcase_name":
"_maxlength_weights",
"x":
np.array([[3, 0, 1, 0], [0, 0, 7, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [2, 6, 7, 10],
"expected_shape": [3, 7],
"maxlength":
7,
"weights":
np.array([[6, 0, 2, 0], [0, 0, 14, 0], [10, 0, 3.5, 3.5]]),
},
{
"testcase_name":
"_minlength_weights",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [2, 6, 14, 6.5, 10],
"expected_shape": [3, 9],
"minlength":
9,
"weights":
np.array([[6, 0, 2, 0], [14, 0, 0, 0], [10, 0, 3, 3.5]]),
},
{
"testcase_name":
"_minlength_larger_values_weights",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [2, 6, 14, 6.5, 10],
"expected_shape": [3, 8],
"minlength":
3,
"weights":
np.array([[6, 0, 2, 0], [14, 0, 0, 0], [10, 0, 3, 3.5]]),
},
{
"testcase_name": "_1d",
"x": np.array([3, 0, 1, 1], dtype=np.int32),
"expected_indices": [[1], [3]],
"expected_values": [2, 1],
"expected_shape": [4],
},
{
"testcase_name":
"_all_axes",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[1], [3], [4], [5]],
"expected_values": [1, 1, 2, 1],
"expected_shape": [6],
"axis":
None,
},
)
def test_sparse_input(self,
x,
expected_indices,
expected_values,
expected_shape,
maxlength=None,
minlength=None,
binary_output=False,
weights=None,
axis=-1):
x_sparse = sparse_ops.from_dense(x)
w_sparse = sparse_ops.from_dense(weights) if weights is not None else None
y = bincount_ops.sparse_bincount(
x_sparse,
weights=w_sparse,
minlength=minlength,
maxlength=maxlength,
binary_output=binary_output,
axis=axis)
self.assertAllEqual(expected_indices, y.indices)
self.assertAllEqual(expected_values, y.values)
self.assertAllEqual(expected_shape, y.dense_shape)
@parameterized.named_parameters(
{
"testcase_name": "_no_maxlength",
"x": [[], [], [3, 0, 1], [], [5, 0, 4, 4]],
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [1, 1, 1, 1, 2, 1],
"expected_shape": [5, 6],
},
{
"testcase_name": "_maxlength",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"maxlength": 7,
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [1, 1, 1, 1, 2, 1],
"expected_shape": [5, 7],
},
{
"testcase_name": "_minlength",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 9,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [1, 1, 1, 1, 1, 2, 1],
"expected_shape": [5, 9],
},
{
"testcase_name": "_minlength_larger_values",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 3,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [1, 1, 1, 1, 1, 2, 1],
"expected_shape": [5, 8],
},
{
"testcase_name": "_no_maxlength_binary",
"x": [[], [], [3, 0, 1], [], [5, 0, 4, 4]],
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [1, 1, 1, 1, 1, 1],
"expected_shape": [5, 6],
"binary_output": True,
},
{
"testcase_name": "_maxlength_binary",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"maxlength": 7,
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [1, 1, 1, 1, 1, 1],
"expected_shape": [5, 7],
"binary_output": True,
},
{
"testcase_name": "_minlength_binary",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 9,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [1, 1, 1, 1, 1, 1, 1],
"expected_shape": [5, 9],
"binary_output": True,
},
{
"testcase_name": "_minlength_larger_values_binary",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 3,
"binary_output": True,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [1, 1, 1, 1, 1, 1, 1],
"expected_shape": [5, 8],
},
{
"testcase_name": "_no_maxlength_weights",
"x": [[], [], [3, 0, 1], [], [5, 0, 4, 4]],
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [0.5, 2, 6, 0.25, 8, 10],
"expected_shape": [5, 6],
"weights": [[], [], [6, 0.5, 2], [], [10, 0.25, 5, 3]],
},
{
"testcase_name": "_maxlength_weights",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"maxlength": 7,
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [0.5, 2, 6, 0.25, 8, 10],
"expected_shape": [5, 7],
"weights": [[], [], [6, 0.5, 2], [14], [10, 0.25, 5, 3]],
},
{
"testcase_name": "_minlength_weights",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 9,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [0.5, 2, 6, 14, 0.25, 8, 10],
"expected_shape": [5, 9],
"weights": [[], [], [6, 0.5, 2], [14], [10, 0.25, 5, 3]],
},
{
"testcase_name": "_minlength_larger_values_weights",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 3,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [0.5, 2, 6, 14, 0.25, 8, 10],
"expected_shape": [5, 8],
"weights": [[], [], [6, 0.5, 2], [14], [10, 0.25, 5, 3]],
},
{
"testcase_name": "_1d",
"x": [3, 0, 1, 1],
"expected_indices": [[0], [1], [3]],
"expected_values": [1, 2, 1],
"expected_shape": [4],
},
{
"testcase_name": "_all_axes",
"x": [[], [], [3, 0, 1], [], [5, 0, 4, 4]],
"expected_indices": [[0], [1], [3], [4], [5]],
"expected_values": [2, 1, 1, 2, 1],
"expected_shape": [6],
"axis": None,
},
)
def test_ragged_input(self,
x,
expected_indices,
expected_values,
expected_shape,
maxlength=None,
minlength=None,
binary_output=False,
weights=None,
axis=-1):
x_ragged = ragged_factory_ops.constant(x)
w = ragged_factory_ops.constant(weights) if weights is not None else None
y = bincount_ops.sparse_bincount(
x_ragged,
weights=w,
minlength=minlength,
maxlength=maxlength,
binary_output=binary_output,
axis=axis)
self.assertAllEqual(expected_indices, y.indices)
self.assertAllEqual(expected_values, y.values)
self.assertAllEqual(expected_shape, y.dense_shape)
class TestDenseBincount(test.TestCase, parameterized.TestCase):
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_input_all_count(self, dtype):
np.random.seed(42)
num_rows = 128
size = 1000
n_elems = 4096
inp_indices = np.random.randint(0, num_rows, (n_elems, 1))
inp_indices = np.concatenate([inp_indices, np.zeros((n_elems, 1))], axis=1)
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
sparse_inp = sparse_tensor.SparseTensor(inp_indices, inp_vals,
[num_rows, 1])
np_out = np.bincount(inp_vals, minlength=size)
self.assertAllEqual(
np_out, self.evaluate(bincount_ops.bincount(sparse_inp, axis=0)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_input_all_count_with_weights(self, dtype):
np.random.seed(42)
num_rows = 128
size = 1000
n_elems = 4096
inp_indices = np.random.randint(0, num_rows, (n_elems, 1))
inp_indices = np.concatenate([inp_indices, np.zeros((n_elems, 1))], axis=1)
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
sparse_inp = sparse_tensor.SparseTensor(inp_indices, inp_vals,
[num_rows, 1])
weight_vals = np.random.random((n_elems,))
sparse_weights = sparse_tensor.SparseTensor(inp_indices, weight_vals,
[num_rows, 1])
np_out = np.bincount(inp_vals, minlength=size, weights=weight_vals)
self.assertAllEqual(
np_out,
self.evaluate(bincount_ops.bincount(
sparse_inp, sparse_weights, axis=0)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_input_all_binary(self, dtype):
np.random.seed(42)
num_rows = 128
size = 10
n_elems = 4096
inp_indices = np.random.randint(0, num_rows, (n_elems, 1))
inp_indices = np.concatenate([inp_indices, np.zeros((n_elems, 1))], axis=1)
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
sparse_inp = sparse_tensor.SparseTensor(inp_indices, inp_vals,
[num_rows, 1])
np_out = np.ones((size,))
self.assertAllEqual(
np_out,
self.evaluate(bincount_ops.bincount(sparse_inp, binary_output=True)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_input_col_reduce_count(self, dtype):
num_rows = 128
num_cols = 27
size = 100
np.random.seed(42)
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate(
[np.bincount(inp[j, :], minlength=size) for j in range(num_rows)],
axis=0), (num_rows, size))
# from_dense will filter out 0s.
inp = inp + 1
# from_dense will cause OOM in GPU.
with ops.device("/CPU:0"):
inp_sparse = sparse_ops.from_dense(inp)
inp_sparse = sparse_tensor.SparseTensor(inp_sparse.indices,
inp_sparse.values - 1,
inp_sparse.dense_shape)
self.assertAllEqual(
np_out, self.evaluate(bincount_ops.bincount(arr=inp_sparse, axis=-1)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_input_col_reduce_binary(self, dtype):
num_rows = 128
num_cols = 27
size = 100
np.random.seed(42)
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate([
np.where(np.bincount(inp[j, :], minlength=size) > 0, 1, 0)
for j in range(num_rows)
],
axis=0), (num_rows, size))
# from_dense will filter out 0s.
inp = inp + 1
# from_dense will cause OOM in GPU.
with ops.device("/CPU:0"):
inp_sparse = sparse_ops.from_dense(inp)
inp_sparse = sparse_tensor.SparseTensor(inp_sparse.indices,
inp_sparse.values - 1,
inp_sparse.dense_shape)
self.assertAllEqual(
np_out,
self.evaluate(
bincount_ops.bincount(arr=inp_sparse, axis=-1, binary_output=True)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]],
dtype)
# pyformat: disable
expected_output = [
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 2, 1]]
# pyformat: enable
self.assertAllEqual(expected_output,
self.evaluate(bincount_ops.bincount(arr=x, axis=-1)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_binary(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]])
# pyformat: disable
expected_output = [
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 1]]
# pyformat: enable
self.assertAllEqual(
expected_output,
self.evaluate(
bincount_ops.bincount(arr=x, axis=-1, binary_output=True)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count_with_weights(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]])
weights = ragged_factory_ops.constant([[], [], [.1, .2, .3], [],
[.2, .5, .6, .3]])
# pyformat: disable
expected_output = [
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[.2, .3, 0, .1, 0, 0],
[0, 0, 0, 0, 0, 0],
[.5, 0, 0, 0, .9, .2]]
# pyformat: enable
self.assertAllClose(
expected_output,
self.evaluate(bincount_ops.bincount(arr=x, weights=weights, axis=-1)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count_np(self, dtype):
np.random.seed(42)
num_rows = 128
num_cols = 27
size = 1000
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate(
[np.bincount(inp[j, :], minlength=size) for j in range(num_rows)],
axis=0), (num_rows, size))
x = ragged_tensor.RaggedTensor.from_tensor(inp)
self.assertAllEqual(
np_out,
self.evaluate(bincount_ops.bincount(arr=x, minlength=size, axis=-1)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count_np_with_weights(self, dtype):
np.random.seed(42)
num_rows = 128
num_cols = 27
size = 1000
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_weight = np.random.random((num_rows, num_cols))
np_out = np.reshape(
np.concatenate([
np.bincount(inp[j, :], weights=np_weight[j, :], minlength=size)
for j in range(num_rows)
],
axis=0), (num_rows, size))
x = ragged_tensor.RaggedTensor.from_tensor(inp)
weights = ragged_tensor.RaggedTensor.from_tensor(np_weight)
self.assertAllEqual(
np_out,
self.evaluate(
bincount_ops.bincount(
arr=x, weights=weights, minlength=size, axis=-1)))
class TestSparseCountFailureModes(test.TestCase):
def test_dense_input_sparse_weights_fails(self):
x = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
weights = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesRegex(ValueError, "must be a tf.Tensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_dense_input_ragged_weights_fails(self):
x = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
weights = ragged_factory_ops.constant([[6, 0.5, 2], [14], [10, 0.25, 5, 3]])
with self.assertRaisesRegex(ValueError, "must be a tf.Tensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_dense_input_wrong_shape_fails(self):
x = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
weights = np.array([[3, 2], [5, 4], [4, 3]])
# Note: Eager mode and graph mode throw different errors here. Graph mode
# will fail with a ValueError from the shape checking logic, while Eager
# will fail with an InvalidArgumentError from the kernel itself.
if context.executing_eagerly():
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same shape"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
else:
with self.assertRaisesRegex(ValueError, "both shapes must be equal"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_dense_weights_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
with self.assertRaisesRegex(ValueError, "must be a SparseTensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_ragged_weights_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = ragged_factory_ops.constant([[6, 0.5, 2], [14], [10, 0.25, 5, 3]])
with self.assertRaisesRegex(ValueError, "must be a SparseTensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_wrong_indices_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = sparse_ops.from_dense(
np.array([[3, 1, 0, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same indices"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_too_many_indices_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = sparse_ops.from_dense(
np.array([[3, 1, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Incompatible shapes"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_wrong_shape_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4], [0, 0, 0, 0]],
dtype=np.int32))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same dense shape"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_ragged_input_dense_weights_fails(self):
x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
weights = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
with self.assertRaisesRegex(ValueError, "must be a RaggedTensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_ragged_input_sparse_weights_fails(self):
x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
weights = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesRegex(ValueError, "must be a RaggedTensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_ragged_input_different_shape_fails(self):
x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
weights = ragged_factory_ops.constant([[6, 0.5, 2], [], [10, 0.25, 5, 3]])
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same row splits"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
if __name__ == "__main__":
test.main()
|
PYSEC-2020-315
|
tests/parser/functions/test_interfaces.py
|
@@ -6,7 +6,7 @@
from vyper.builtin_interfaces import ERC20, ERC721
from vyper.cli.utils import extract_file_interface_imports
from vyper.compiler import compile_code, compile_codes
-from vyper.exceptions import InterfaceViolation, StructureException
+from vyper.exceptions import ArgumentException, InterfaceViolation, StructureException
def test_basic_extract_interface():
@@ -308,6 +308,170 @@ def test():
assert erc20.balanceOf(sender) == 1000
+# test data returned from external interface gets clamped
[email protected]("typ", ("int128", "uint8"))
+def test_external_interface_int_clampers(get_contract, assert_tx_failed, typ):
+ external_contract = f"""
+@external
+def ok() -> {typ}:
+ return 1
+
+@external
+def should_fail() -> int256:
+ return -2**255 # OOB for all int/uint types with less than 256 bits
+ """
+
+ code = f"""
+interface BadContract:
+ def ok() -> {typ}: view
+ def should_fail() -> {typ}: view
+
+foo: BadContract
+
+@external
+def __init__(addr: BadContract):
+ self.foo = addr
+
+
+@external
+def test_ok() -> {typ}:
+ return self.foo.ok()
+
+@external
+def test_fail() -> {typ}:
+ return self.foo.should_fail()
+
+@external
+def test_fail2() -> {typ}:
+ x: {typ} = self.foo.should_fail()
+ return x
+
+@external
+def test_fail3() -> int256:
+ return convert(self.foo.should_fail(), int256)
+ """
+
+ bad_c = get_contract(external_contract)
+ c = get_contract(
+ code,
+ bad_c.address,
+ interface_codes={"BadCode": {"type": "vyper", "code": external_contract}},
+ )
+ assert bad_c.ok() == 1
+ assert bad_c.should_fail() == -(2 ** 255)
+
+ assert c.test_ok() == 1
+ assert_tx_failed(lambda: c.test_fail())
+ assert_tx_failed(lambda: c.test_fail2())
+ assert_tx_failed(lambda: c.test_fail3())
+
+
+# test data returned from external interface gets clamped
+def test_external_interface_bytes_clampers(get_contract, assert_tx_failed):
+ external_contract = """
+@external
+def ok() -> Bytes[2]:
+ return b"12"
+
+@external
+def should_fail() -> Bytes[3]:
+ return b"123"
+ """
+
+ code = """
+interface BadContract:
+ def ok() -> Bytes[2]: view
+ def should_fail() -> Bytes[2]: view
+
+foo: BadContract
+
+@external
+def __init__(addr: BadContract):
+ self.foo = addr
+
+
+@external
+def test_ok() -> Bytes[2]:
+ return self.foo.ok()
+
+@external
+def test_fail() -> Bytes[3]:
+ return self.foo.should_fail()
+ """
+
+ bad_c = get_contract(external_contract)
+ c = get_contract(code, bad_c.address)
+ assert bad_c.ok() == b"12"
+ assert bad_c.should_fail() == b"123"
+
+ assert c.test_ok() == b"12"
+ assert_tx_failed(lambda: c.test_fail())
+
+
+# test data returned from external interface gets clamped
+def test_json_abi_bytes_clampers(get_contract, assert_tx_failed, assert_compile_failed):
+ external_contract = """
+@external
+def returns_Bytes3() -> Bytes[3]:
+ return b"123"
+ """
+
+ should_not_compile = """
+import BadJSONInterface as BadJSONInterface
+@external
+def foo(x: BadJSONInterface) -> Bytes[2]:
+ return slice(x.returns_Bytes3(), 0, 2)
+ """
+
+ code = """
+import BadJSONInterface as BadJSONInterface
+
+foo: BadJSONInterface
+
+@external
+def __init__(addr: BadJSONInterface):
+ self.foo = addr
+
+
+@external
+def test_fail1() -> Bytes[2]:
+ # should compile, but raise runtime exception
+ return self.foo.returns_Bytes3()
+
+@external
+def test_fail2() -> Bytes[2]:
+ # should compile, but raise runtime exception
+ x: Bytes[2] = self.foo.returns_Bytes3()
+ return x
+
+@external
+def test_fail3() -> Bytes[3]:
+ # should revert - returns_Bytes3 is inferred to have return type Bytes[2]
+ # (because test_fail3 comes after test_fail1)
+ return self.foo.returns_Bytes3()
+
+ """
+
+ bad_c = get_contract(external_contract)
+ bad_c_interface = {
+ "BadJSONInterface": {
+ "type": "json",
+ "code": compile_code(external_contract, ["abi"])["abi"],
+ }
+ }
+
+ assert_compile_failed(
+ lambda: get_contract(should_not_compile, interface_codes=bad_c_interface), ArgumentException
+ )
+
+ c = get_contract(code, bad_c.address, interface_codes=bad_c_interface)
+ assert bad_c.returns_Bytes3() == b"123"
+
+ assert_tx_failed(lambda: c.test_fail1())
+ assert_tx_failed(lambda: c.test_fail2())
+ assert_tx_failed(lambda: c.test_fail3())
+
+
def test_units_interface(w3, get_contract):
code = """
import balanceof as BalanceOf
|
from decimal import Decimal
import pytest
from vyper.ast.signatures.interface import extract_sigs
from vyper.builtin_interfaces import ERC20, ERC721
from vyper.cli.utils import extract_file_interface_imports
from vyper.compiler import compile_code, compile_codes
from vyper.exceptions import InterfaceViolation, StructureException
def test_basic_extract_interface():
code = """
# Events
event Transfer:
_from: address
_to: address
_value: uint256
# Functions
@view
@external
def allowance(_owner: address, _spender: address) -> (uint256, uint256):
return 1, 2
"""
out = compile_code(code, ["interface"])
out = out["interface"]
code_pass = "\n".join(code.split("\n")[:-2] + [" pass"]) # replace with a pass statement.
assert code_pass.strip() == out.strip()
def test_basic_extract_external_interface():
code = """
@view
@external
def allowance(_owner: address, _spender: address) -> (uint256, uint256):
return 1, 2
@external
def test(_owner: address):
pass
@view
@internal
def _prive(_owner: address, _spender: address) -> (uint256, uint256):
return 1, 2
"""
interface = """
# External Interfaces
interface One:
def allowance(_owner: address, _spender: address) -> (uint256, uint256): view
def test(_owner: address): nonpayable
"""
out = compile_codes({"one.vy": code}, ["external_interface"])["one.vy"]
out = out["external_interface"]
assert interface.strip() == out.strip()
def test_basic_interface_implements(assert_compile_failed):
code = """
from vyper.interfaces import ERC20
implements: ERC20
@external
def test() -> bool:
return True
"""
assert_compile_failed(lambda: compile_code(code), InterfaceViolation)
def test_builtin_interfaces_parse():
assert len(extract_sigs({"type": "vyper", "code": ERC20.interface_code})) == 6
assert len(extract_sigs({"type": "vyper", "code": ERC721.interface_code})) == 9
def test_extract_sigs_ignores_imports():
interface_code = """
{}
@external
def foo() -> uint256:
pass
"""
base = extract_sigs({"type": "vyper", "code": interface_code.format("")})
for stmt in ("import x as x", "from x import y"):
sigs = extract_sigs({"type": "vyper", "code": interface_code.format(stmt)})
assert [type(i) for i in base] == [type(i) for i in sigs]
def test_external_interface_parsing(assert_compile_failed):
interface_code = """
@external
def foo() -> uint256:
pass
@external
def bar() -> uint256:
pass
"""
interface_codes = {"FooBarInterface": {"type": "vyper", "code": interface_code}}
code = """
import a as FooBarInterface
implements: FooBarInterface
@external
def foo() -> uint256:
return 1
@external
def bar() -> uint256:
return 2
"""
assert compile_code(code, interface_codes=interface_codes)
not_implemented_code = """
import a as FooBarInterface
implements: FooBarInterface
@external
def foo() -> uint256:
return 1
"""
assert_compile_failed(
lambda: compile_code(not_implemented_code, interface_codes=interface_codes),
InterfaceViolation,
)
def test_missing_event(assert_compile_failed):
interface_code = """
event Foo:
a: uint256
"""
interface_codes = {"FooBarInterface": {"type": "vyper", "code": interface_code}}
not_implemented_code = """
import a as FooBarInterface
implements: FooBarInterface
@external
def bar() -> uint256:
return 1
"""
assert_compile_failed(
lambda: compile_code(not_implemented_code, interface_codes=interface_codes),
InterfaceViolation,
)
def test_malformed_event(assert_compile_failed):
interface_code = """
event Foo:
a: uint256
"""
interface_codes = {"FooBarInterface": {"type": "vyper", "code": interface_code}}
not_implemented_code = """
import a as FooBarInterface
implements: FooBarInterface
event Foo:
a: int128
@external
def bar() -> uint256:
return 1
"""
assert_compile_failed(
lambda: compile_code(not_implemented_code, interface_codes=interface_codes),
InterfaceViolation,
)
VALID_IMPORT_CODE = [
# import statement, import path without suffix
("import a as Foo", "a"),
("import b.a as Foo", "b/a"),
("import Foo as Foo", "Foo"),
("from a import Foo", "a/Foo"),
("from b.a import Foo", "b/a/Foo"),
("from .a import Foo", "./a/Foo"),
("from ..a import Foo", "../a/Foo"),
]
@pytest.mark.parametrize("code", VALID_IMPORT_CODE)
def test_extract_file_interface_imports(code):
assert extract_file_interface_imports(code[0]) == {"Foo": code[1]}
BAD_IMPORT_CODE = [
"import a", # must alias absolute imports
"import a as A\nimport a as A", # namespace collisions
"from b import a\nfrom a import a",
"from . import a\nimport a as a",
"import a as a\nfrom . import a",
]
@pytest.mark.parametrize("code", BAD_IMPORT_CODE)
def test_extract_file_interface_imports_raises(code, assert_compile_failed):
assert_compile_failed(lambda: extract_file_interface_imports(code), StructureException)
def test_external_call_to_interface(w3, get_contract):
token_code = """
balanceOf: public(HashMap[address, uint256])
@external
def transfer(to: address, _value: uint256):
self.balanceOf[to] += _value
"""
code = """
import one as TokenCode
interface EPI:
def test() -> uint256: view
token_address: TokenCode
@external
def __init__(_token_address: address):
self.token_address = TokenCode(_token_address)
@external
def test():
self.token_address.transfer(msg.sender, 1000)
"""
erc20 = get_contract(token_code)
test_c = get_contract(
code, *[erc20.address], interface_codes={"TokenCode": {"type": "vyper", "code": token_code}}
)
sender = w3.eth.accounts[0]
assert erc20.balanceOf(sender) == 0
test_c.test(transact={})
assert erc20.balanceOf(sender) == 1000
def test_external_call_to_builtin_interface(w3, get_contract):
token_code = """
balanceOf: public(HashMap[address, uint256])
@external
def transfer(to: address, _value: uint256) -> bool:
self.balanceOf[to] += _value
return True
"""
code = """
from vyper.interfaces import ERC20
token_address: ERC20
@external
def __init__(_token_address: address):
self.token_address = ERC20(_token_address)
@external
def test():
self.token_address.transfer(msg.sender, 1000)
"""
erc20 = get_contract(token_code)
test_c = get_contract(
code, *[erc20.address], interface_codes={"TokenCode": {"type": "vyper", "code": token_code}}
)
sender = w3.eth.accounts[0]
assert erc20.balanceOf(sender) == 0
test_c.test(transact={})
assert erc20.balanceOf(sender) == 1000
def test_units_interface(w3, get_contract):
code = """
import balanceof as BalanceOf
implements: BalanceOf
@external
@view
def balanceOf(owner: address) -> uint256:
return as_wei_value(1, "ether")
"""
interface_code = """
@external
@view
def balanceOf(owner: address) -> uint256:
pass
"""
interface_codes = {"BalanceOf": {"type": "vyper", "code": interface_code}}
c = get_contract(code, interface_codes=interface_codes)
assert c.balanceOf(w3.eth.accounts[0]) == w3.toWei(1, "ether")
def test_local_and_global_interface_namespaces():
interface_code = """
@external
def foo() -> uint256:
pass
"""
global_interface_codes = {
"FooInterface": {"type": "vyper", "code": interface_code},
"BarInterface": {"type": "vyper", "code": interface_code},
}
local_interface_codes = {
"FooContract": {"FooInterface": {"type": "vyper", "code": interface_code}},
"BarContract": {"BarInterface": {"type": "vyper", "code": interface_code}},
}
code = """
import a as {0}
implements: {0}
@external
def foo() -> uint256:
return 1
"""
codes = {"FooContract": code.format("FooInterface"), "BarContract": code.format("BarInterface")}
global_compiled = compile_codes(codes, interface_codes=global_interface_codes)
local_compiled = compile_codes(codes, interface_codes=local_interface_codes)
assert global_compiled == local_compiled
def test_self_interface_is_allowed(get_contract):
code = """
interface Bar:
def foo() -> uint256: view
@external
def foo() -> uint256 :
return 42
@external
def bar() -> uint256:
return Bar(self).foo()
"""
c = get_contract(code)
assert c.bar() == 42
def test_self_interface_via_storage(get_contract):
code = """
interface Bar:
def foo() -> uint256: view
bar_contract: Bar
@external
def __init__():
self.bar_contract = Bar(self)
@external
def foo() -> uint256 :
return 42
@external
def bar() -> uint256:
return self.bar_contract.foo()
"""
c = get_contract(code)
assert c.bar() == 42
def test_self_interface_via_calldata(get_contract):
code = """
interface Bar:
def foo() -> uint256: view
@external
def foo() -> uint256 :
return 42
@external
def bar(a: address) -> uint256:
return Bar(a).foo()
"""
c = get_contract(code)
assert c.bar(c.address) == 42
type_str_params = [
("int128", -33),
("uint256", 42),
("bool", True),
("address", "0x1234567890123456789012345678901234567890"),
("bytes32", b"bytes32bytes32bytes32bytes32poop"),
("decimal", Decimal("3.1337")),
("Bytes[4]", b"newp"),
("String[6]", "potato"),
]
interface_test_code = """
@external
@view
def test_json(a: {0}) -> {0}:
return a
"""
def convert_v1_abi(abi):
new_abi = []
for func_abi in abi:
if "stateMutability" in func_abi:
mutability = func_abi["stateMutability"]
del func_abi["stateMutability"]
if mutability == "payable":
func_abi["constant"] = False
func_abi["payable"] = True
elif mutability == "view":
func_abi["constant"] = True
func_abi["payable"] = False
elif mutability == "pure":
# NOTE: pure "changes" to "view"
func_abi["constant"] = True
func_abi["payable"] = False
else: # "nonpayable"
func_abi["constant"] = False
func_abi["payable"] = False
else: # Assume "nonpayable" by default
func_abi["constant"] = False
func_abi["payable"] = False
new_abi.append(func_abi)
return new_abi
@pytest.mark.parametrize("type_str", [i[0] for i in type_str_params])
def test_json_interface_implements(type_str):
code = interface_test_code.format(type_str)
abi = compile_code(code, ["abi"])["abi"]
code = f"import jsonabi as jsonabi\nimplements: jsonabi\n{code}"
compile_code(code, interface_codes={"jsonabi": {"type": "json", "code": abi}})
compile_code(code, interface_codes={"jsonabi": {"type": "json", "code": convert_v1_abi(abi)}})
@pytest.mark.parametrize("type_str,value", type_str_params)
def test_json_interface_calls(get_contract, type_str, value):
code = interface_test_code.format(type_str)
abi = compile_code(code, ["abi"])["abi"]
c1 = get_contract(code)
code = f"""
import jsonabi as jsonabi
@external
@view
def test_call(a: address, b: {type_str}) -> {type_str}:
return jsonabi(a).test_json(b)
"""
c2 = get_contract(code, interface_codes={"jsonabi": {"type": "json", "code": abi}})
assert c2.test_call(c1.address, value) == value
c3 = get_contract(
code, interface_codes={"jsonabi": {"type": "json", "code": convert_v1_abi(abi)}}
)
assert c3.test_call(c1.address, value) == value
|
PYSEC-2022-198
|
vyper/codegen/core.py
|
@@ -123,10 +123,7 @@ def _dynarray_make_setter(dst, src):
# for ABI-encoded dynamic data, we must loop to unpack, since
# the layout does not match our memory layout
- should_loop = (
- src.encoding in (Encoding.ABI, Encoding.JSON_ABI)
- and src.typ.subtype.abi_type.is_dynamic()
- )
+ should_loop = src.encoding == Encoding.ABI and src.typ.subtype.abi_type.is_dynamic()
# if the subtype is dynamic, there might be a lot of
# unused space inside of each element. for instance
@@ -379,7 +376,7 @@ def _get_element_ptr_tuplelike(parent, key):
ofst = 0 # offset from parent start
- if parent.encoding in (Encoding.ABI, Encoding.JSON_ABI):
+ if parent.encoding == Encoding.ABI:
if parent.location == STORAGE:
raise CompilerPanic("storage variables should not be abi encoded") # pragma: notest
@@ -449,7 +446,7 @@ def _get_element_ptr_array(parent, key, array_bounds_check):
# NOTE: there are optimization rules for this when ix or bound is literal
ix = IRnode.from_list([clamp_op, ix, bound], typ=ix.typ)
- if parent.encoding in (Encoding.ABI, Encoding.JSON_ABI):
+ if parent.encoding == Encoding.ABI:
if parent.location == STORAGE:
raise CompilerPanic("storage variables should not be abi encoded") # pragma: notest
@@ -703,20 +700,20 @@ def _freshname(name):
# returns True if t is ABI encoded and is a type that needs any kind of
# validation
def needs_clamp(t, encoding):
- if encoding not in (Encoding.ABI, Encoding.JSON_ABI):
+ if encoding == Encoding.VYPER:
return False
+ if encoding != Encoding.ABI:
+ raise CompilerPanic("unreachable") # pragma: notest
if isinstance(t, (ByteArrayLike, DArrayType)):
- if encoding == Encoding.JSON_ABI:
- # don't have bytestring size bound from json, don't clamp
- return False
- return True
- if isinstance(t, BaseType) and t.typ not in ("int256", "uint256", "bytes32"):
return True
+ if isinstance(t, BaseType):
+ return t.typ not in ("int256", "uint256", "bytes32")
if isinstance(t, SArrayType):
return needs_clamp(t.subtype, encoding)
if isinstance(t, TupleLike):
return any(needs_clamp(m, encoding) for m in t.tuple_members())
- return False
+
+ raise CompilerPanic("unreachable") # pragma: notest
# Create an x=y statement, where the types may be compound
|
from vyper import ast as vy_ast
from vyper.address_space import CALLDATA, DATA, IMMUTABLES, MEMORY, STORAGE
from vyper.codegen.ir_node import Encoding, IRnode
from vyper.codegen.types import (
DYNAMIC_ARRAY_OVERHEAD,
ArrayLike,
BaseType,
ByteArrayLike,
DArrayType,
MappingType,
SArrayType,
StructType,
TupleLike,
TupleType,
ceil32,
is_bytes_m_type,
is_decimal_type,
is_integer_type,
)
from vyper.evm.opcodes import version_check
from vyper.exceptions import CompilerPanic, StructureException, TypeCheckFailure, TypeMismatch
from vyper.utils import GAS_CALLDATACOPY_WORD, GAS_CODECOPY_WORD, GAS_IDENTITY, GAS_IDENTITYWORD
# propagate revert message when calls to external contracts fail
def check_external_call(call_ir):
copy_revertdata = ["returndatacopy", 0, 0, "returndatasize"]
revert = ["revert", 0, "returndatasize"]
propagate_revert_ir = ["seq", copy_revertdata, revert]
return ["if", ["iszero", call_ir], propagate_revert_ir]
# cost per byte of the identity precompile
def _identity_gas_bound(num_bytes):
return GAS_IDENTITY + GAS_IDENTITYWORD * (ceil32(num_bytes) // 32)
def _calldatacopy_gas_bound(num_bytes):
return GAS_CALLDATACOPY_WORD * ceil32(num_bytes) // 32
def _codecopy_gas_bound(num_bytes):
return GAS_CODECOPY_WORD * ceil32(num_bytes) // 32
# Copy byte array word-for-word (including layout)
def make_byte_array_copier(dst, src):
assert isinstance(src.typ, ByteArrayLike)
assert isinstance(dst.typ, ByteArrayLike)
if src.typ.maxlen > dst.typ.maxlen:
raise TypeMismatch(f"Cannot cast from {src.typ} to {dst.typ}")
# stricter check for zeroing a byte array.
if src.value == "~empty" and src.typ.maxlen != dst.typ.maxlen:
raise TypeMismatch(
f"Bad type for clearing bytes: expected {dst.typ} but got {src.typ}"
) # pragma: notest
if src.value == "~empty":
# set length word to 0.
return STORE(dst, 0)
with src.cache_when_complex("src") as (b1, src):
with get_bytearray_length(src).cache_when_complex("len") as (b2, len_):
max_bytes = src.typ.maxlen
ret = ["seq"]
# store length
ret.append(STORE(dst, len_))
dst = bytes_data_ptr(dst)
src = bytes_data_ptr(src)
ret.append(copy_bytes(dst, src, len_, max_bytes))
return b1.resolve(b2.resolve(ret))
def bytes_data_ptr(ptr):
if ptr.location is None:
raise CompilerPanic("tried to modify non-pointer type")
assert isinstance(ptr.typ, ByteArrayLike)
return add_ofst(ptr, ptr.location.word_scale)
def dynarray_data_ptr(ptr):
if ptr.location is None:
raise CompilerPanic("tried to modify non-pointer type")
assert isinstance(ptr.typ, DArrayType)
return add_ofst(ptr, ptr.location.word_scale)
def _dynarray_make_setter(dst, src):
assert isinstance(src.typ, DArrayType)
assert isinstance(dst.typ, DArrayType)
if src.value == "~empty":
return IRnode.from_list(STORE(dst, 0))
if src.value == "multi":
ret = ["seq"]
# handle literals
# write the length word
store_length = STORE(dst, len(src.args))
ann = None
if src.annotation is not None:
ann = f"len({src.annotation})"
store_length = IRnode.from_list(store_length, annotation=ann)
ret.append(store_length)
n_items = len(src.args)
for i in range(n_items):
k = IRnode.from_list(i, typ="uint256")
dst_i = get_element_ptr(dst, k, array_bounds_check=False)
src_i = get_element_ptr(src, k, array_bounds_check=False)
ret.append(make_setter(dst_i, src_i))
return ret
with src.cache_when_complex("darray_src") as (b1, src):
# for ABI-encoded dynamic data, we must loop to unpack, since
# the layout does not match our memory layout
should_loop = (
src.encoding in (Encoding.ABI, Encoding.JSON_ABI)
and src.typ.subtype.abi_type.is_dynamic()
)
# if the subtype is dynamic, there might be a lot of
# unused space inside of each element. for instance
# DynArray[DynArray[uint256, 100], 5] where all the child
# arrays are empty - for this case, we recursively call
# into make_setter instead of straight bytes copy
# TODO we can make this heuristic more precise, e.g.
# loop when subtype.is_dynamic AND location == storage
# OR array_size <= /bound where loop is cheaper than memcpy/
should_loop |= src.typ.subtype.abi_type.is_dynamic()
should_loop |= needs_clamp(src.typ.subtype, src.encoding)
with get_dyn_array_count(src).cache_when_complex("darray_count") as (b2, count):
ret = ["seq"]
ret.append(STORE(dst, count))
if should_loop:
i = IRnode.from_list(_freshname("copy_darray_ix"), typ="uint256")
loop_body = make_setter(
get_element_ptr(dst, i, array_bounds_check=False),
get_element_ptr(src, i, array_bounds_check=False),
)
loop_body.annotation = f"{dst}[i] = {src}[i]"
ret.append(["repeat", i, 0, count, src.typ.count, loop_body])
else:
element_size = src.typ.subtype.memory_bytes_required
# number of elements * size of element in bytes
n_bytes = _mul(count, element_size)
max_bytes = src.typ.count * element_size
src_ = dynarray_data_ptr(src)
dst_ = dynarray_data_ptr(dst)
ret.append(copy_bytes(dst_, src_, n_bytes, max_bytes))
return b1.resolve(b2.resolve(ret))
# Copy bytes
# Accepts 4 arguments:
# (i) an IR node for the start position of the source
# (ii) an IR node for the start position of the destination
# (iii) an IR node for the length (in bytes)
# (iv) a constant for the max length (in bytes)
# NOTE: may pad to ceil32 of `length`! If you ask to copy 1 byte, it may
# copy an entire (32-byte) word, depending on the copy routine chosen.
# TODO maybe always pad to ceil32, to reduce dirty bytes bugs
def copy_bytes(dst, src, length, length_bound):
annotation = f"copy_bytes from {src} to {dst}"
src = IRnode.from_list(src)
dst = IRnode.from_list(dst)
length = IRnode.from_list(length)
with src.cache_when_complex("src") as (b1, src), length.cache_when_complex(
"copy_bytes_count"
) as (b2, length), dst.cache_when_complex("dst") as (b3, dst):
# fast code for common case where num bytes is small
# TODO expand this for more cases where num words is less than ~8
if length_bound <= 32:
copy_op = STORE(dst, LOAD(src))
ret = IRnode.from_list(copy_op, annotation=annotation)
return b1.resolve(b2.resolve(b3.resolve(ret)))
if dst.location == MEMORY and src.location in (MEMORY, CALLDATA, DATA):
# special cases: batch copy to memory
# TODO: iloadbytes
if src.location == MEMORY:
copy_op = ["staticcall", "gas", 4, src, length, dst, length]
gas_bound = _identity_gas_bound(length_bound)
elif src.location == CALLDATA:
copy_op = ["calldatacopy", dst, src, length]
gas_bound = _calldatacopy_gas_bound(length_bound)
elif src.location == DATA:
copy_op = ["dloadbytes", dst, src, length]
# note: dloadbytes compiles to CODECOPY
gas_bound = _codecopy_gas_bound(length_bound)
ret = IRnode.from_list(copy_op, annotation=annotation, add_gas_estimate=gas_bound)
return b1.resolve(b2.resolve(b3.resolve(ret)))
if dst.location == IMMUTABLES and src.location in (MEMORY, DATA):
# TODO istorebytes-from-mem, istorebytes-from-calldata(?)
# compile to identity, CODECOPY respectively.
pass
# general case, copy word-for-word
# pseudocode for our approach (memory-storage as example):
# for i in range(len, bound=MAX_LEN):
# sstore(_dst + i, mload(src + i * 32))
i = IRnode.from_list(_freshname("copy_bytes_ix"), typ="uint256")
n = ["div", ["ceil32", length], 32]
n_bound = ceil32(length_bound) // 32
dst_i = add_ofst(dst, _mul(i, dst.location.word_scale))
src_i = add_ofst(src, _mul(i, src.location.word_scale))
copy_one_word = STORE(dst_i, LOAD(src_i))
main_loop = ["repeat", i, 0, n, n_bound, copy_one_word]
return b1.resolve(
b2.resolve(b3.resolve(IRnode.from_list(main_loop, annotation=annotation)))
)
# get the number of bytes at runtime
def get_bytearray_length(arg):
typ = BaseType("uint256")
return IRnode.from_list(LOAD(arg), typ=typ)
# get the number of elements at runtime
def get_dyn_array_count(arg):
assert isinstance(arg.typ, DArrayType)
typ = BaseType("uint256")
if arg.value == "multi":
return IRnode.from_list(len(arg.args), typ=typ)
if arg.value == "~empty":
# empty(DynArray[])
return IRnode.from_list(0, typ=typ)
return IRnode.from_list(LOAD(arg), typ=typ)
def append_dyn_array(darray_node, elem_node):
assert isinstance(darray_node.typ, DArrayType)
assert darray_node.typ.count > 0, "jerk boy u r out"
ret = ["seq"]
with darray_node.cache_when_complex("darray") as (b1, darray_node):
len_ = get_dyn_array_count(darray_node)
with len_.cache_when_complex("old_darray_len") as (b2, len_):
ret.append(["assert", ["le", len_, darray_node.typ.count - 1]])
ret.append(STORE(darray_node, ["add", len_, 1]))
# NOTE: typechecks elem_node
# NOTE skip array bounds check bc we already asserted len two lines up
ret.append(
make_setter(get_element_ptr(darray_node, len_, array_bounds_check=False), elem_node)
)
return IRnode.from_list(b1.resolve(b2.resolve(ret)))
def pop_dyn_array(darray_node, return_popped_item):
assert isinstance(darray_node.typ, DArrayType)
ret = ["seq"]
with darray_node.cache_when_complex("darray") as (b1, darray_node):
old_len = ["clamp_nonzero", get_dyn_array_count(darray_node)]
new_len = IRnode.from_list(["sub", old_len, 1], typ="uint256")
with new_len.cache_when_complex("new_len") as (b2, new_len):
ret.append(STORE(darray_node, new_len))
# NOTE skip array bounds check bc we already asserted len two lines up
if return_popped_item:
popped_item = get_element_ptr(darray_node, new_len, array_bounds_check=False)
ret.append(popped_item)
typ = popped_item.typ
location = popped_item.location
encoding = popped_item.encoding
else:
typ, location, encoding = None, None, None
return IRnode.from_list(
b1.resolve(b2.resolve(ret)), typ=typ, location=location, encoding=encoding
)
def getpos(node):
return (
node.lineno,
node.col_offset,
getattr(node, "end_lineno", None),
getattr(node, "end_col_offset", None),
)
# add an offset to a pointer, keeping location and encoding info
def add_ofst(ptr, ofst):
ret = ["add", ptr, ofst]
return IRnode.from_list(ret, location=ptr.location, encoding=ptr.encoding)
# shorthand util
def _mul(x, y):
ret = ["mul", x, y]
return IRnode.from_list(ret)
# Resolve pointer locations for ABI-encoded data
def _getelemptr_abi_helper(parent, member_t, ofst, clamp=True):
member_abi_t = member_t.abi_type
# ABI encoding has length word and then pretends length is not there
# e.g. [[1,2]] is encoded as 0x01 <len> 0x20 <inner array ofst> <encode(inner array)>
# note that inner array ofst is 0x20, not 0x40.
if has_length_word(parent.typ):
parent = add_ofst(parent, parent.location.word_scale * DYNAMIC_ARRAY_OVERHEAD)
ofst_ir = add_ofst(parent, ofst)
if member_abi_t.is_dynamic():
# double dereference, according to ABI spec
# TODO optimize special case: first dynamic item
# offset is statically known.
ofst_ir = add_ofst(parent, unwrap_location(ofst_ir))
return IRnode.from_list(
ofst_ir,
typ=member_t,
location=parent.location,
encoding=parent.encoding,
annotation=f"{parent}{ofst}",
)
# TODO simplify this code, especially the ABI decoding
def _get_element_ptr_tuplelike(parent, key):
typ = parent.typ
assert isinstance(typ, TupleLike)
if isinstance(typ, StructType):
assert isinstance(key, str)
subtype = typ.members[key]
attrs = list(typ.tuple_keys())
index = attrs.index(key)
annotation = key
else:
assert isinstance(key, int)
subtype = typ.members[key]
attrs = list(range(len(typ.members)))
index = key
annotation = None
# generated by empty() + make_setter
if parent.value == "~empty":
return IRnode.from_list("~empty", typ=subtype)
if parent.value == "multi":
assert parent.encoding != Encoding.ABI, "no abi-encoded literals"
return parent.args[index]
ofst = 0 # offset from parent start
if parent.encoding in (Encoding.ABI, Encoding.JSON_ABI):
if parent.location == STORAGE:
raise CompilerPanic("storage variables should not be abi encoded") # pragma: notest
member_t = typ.members[attrs[index]]
for i in range(index):
member_abi_t = typ.members[attrs[i]].abi_type
ofst += member_abi_t.embedded_static_size()
return _getelemptr_abi_helper(parent, member_t, ofst)
if parent.location.word_addressable:
for i in range(index):
ofst += typ.members[attrs[i]].storage_size_in_words
elif parent.location.byte_addressable:
for i in range(index):
ofst += typ.members[attrs[i]].memory_bytes_required
else:
raise CompilerPanic(f"bad location {parent.location}") # pragma: notest
return IRnode.from_list(
add_ofst(parent, ofst),
typ=subtype,
location=parent.location,
encoding=parent.encoding,
annotation=annotation,
)
def has_length_word(typ):
return isinstance(typ, (DArrayType, ByteArrayLike))
# TODO simplify this code, especially the ABI decoding
def _get_element_ptr_array(parent, key, array_bounds_check):
assert isinstance(parent.typ, ArrayLike)
if not is_integer_type(key.typ):
raise TypeCheckFailure(f"{key.typ} used as array index")
subtype = parent.typ.subtype
if parent.value == "~empty":
if array_bounds_check:
# this case was previously missing a bounds check. codegen
# is a bit complicated when bounds check is required, so
# block it. there is no reason to index into a literal empty
# array anyways!
raise TypeCheckFailure("indexing into zero array not allowed")
return IRnode.from_list("~empty", subtype)
if parent.value == "multi":
assert isinstance(key.value, int)
return parent.args[key.value]
ix = unwrap_location(key)
if array_bounds_check:
# clamplt works, even for signed ints. since two's-complement
# is used, if the index is negative, (unsigned) LT will interpret
# it as a very large number, larger than any practical value for
# an array index, and the clamp will throw an error.
clamp_op = "uclamplt"
is_darray = isinstance(parent.typ, DArrayType)
bound = get_dyn_array_count(parent) if is_darray else parent.typ.count
# NOTE: there are optimization rules for this when ix or bound is literal
ix = IRnode.from_list([clamp_op, ix, bound], typ=ix.typ)
if parent.encoding in (Encoding.ABI, Encoding.JSON_ABI):
if parent.location == STORAGE:
raise CompilerPanic("storage variables should not be abi encoded") # pragma: notest
member_abi_t = subtype.abi_type
ofst = _mul(ix, member_abi_t.embedded_static_size())
return _getelemptr_abi_helper(parent, subtype, ofst)
if parent.location.word_addressable:
element_size = subtype.storage_size_in_words
elif parent.location.byte_addressable:
element_size = subtype.memory_bytes_required
else:
raise CompilerPanic("unreachable") # pragma: notest
ofst = _mul(ix, element_size)
if has_length_word(parent.typ):
data_ptr = add_ofst(parent, parent.location.word_scale * DYNAMIC_ARRAY_OVERHEAD)
else:
data_ptr = parent
return IRnode.from_list(add_ofst(data_ptr, ofst), typ=subtype, location=parent.location)
def _get_element_ptr_mapping(parent, key):
assert isinstance(parent.typ, MappingType)
subtype = parent.typ.valuetype
key = unwrap_location(key)
# TODO when is key None?
if key is None or parent.location != STORAGE:
raise TypeCheckFailure("bad dereference on mapping {parent}[{sub}]")
return IRnode.from_list(["sha3_64", parent, key], typ=subtype, location=STORAGE)
# Take a value representing a memory or storage location, and descend down to
# an element or member variable
# This is analogous (but not necessarily equivalent to) getelementptr in LLVM.
def get_element_ptr(parent, key, array_bounds_check=True):
with parent.cache_when_complex("val") as (b, parent):
typ = parent.typ
if isinstance(typ, TupleLike):
ret = _get_element_ptr_tuplelike(parent, key)
elif isinstance(typ, MappingType):
ret = _get_element_ptr_mapping(parent, key)
elif isinstance(typ, ArrayLike):
ret = _get_element_ptr_array(parent, key, array_bounds_check)
else:
raise CompilerPanic(f"get_element_ptr cannot be called on {typ}") # pragma: notest
return b.resolve(ret)
def LOAD(ptr: IRnode) -> IRnode:
if ptr.location is None:
raise CompilerPanic("cannot dereference non-pointer type")
op = ptr.location.load_op
if op is None:
raise CompilerPanic(f"unreachable {ptr.location}") # pragma: notest
return IRnode.from_list([op, ptr])
def STORE(ptr: IRnode, val: IRnode) -> IRnode:
if ptr.location is None:
raise CompilerPanic("cannot dereference non-pointer type")
op = ptr.location.store_op
if op is None:
raise CompilerPanic(f"unreachable {ptr.location}") # pragma: notest
return IRnode.from_list([op, ptr, val])
# Unwrap location
def unwrap_location(orig):
if orig.location is not None:
return IRnode.from_list(LOAD(orig), typ=orig.typ)
else:
# CMC 2022-03-24 TODO refactor so this branch can be removed
if orig.value == "~empty":
return IRnode.from_list(0, typ=orig.typ)
return orig
# utility function, constructs an IR tuple out of a list of IR nodes
def ir_tuple_from_args(args):
typ = TupleType([x.typ for x in args])
return IRnode.from_list(["multi"] + [x for x in args], typ=typ)
def _needs_external_call_wrap(ir_typ):
# for calls to ABI conforming contracts.
# according to the ABI spec, return types are ALWAYS tuples even
# if only one element is being returned.
# https://solidity.readthedocs.io/en/latest/abi-spec.html#function-selector-and-argument-encoding
# "and the return values v_1, ..., v_k of f are encoded as
#
# enc((v_1, ..., v_k))
# i.e. the values are combined into a tuple and encoded.
# "
# therefore, wrap it in a tuple if it's not already a tuple.
# for example, `bytes` is returned as abi-encoded (bytes,)
# and `(bytes,)` is returned as abi-encoded ((bytes,),)
# In general `-> X` gets returned as (X,)
# including structs. MyStruct is returned as abi-encoded (MyStruct,).
# (Sorry this is so confusing. I didn't make these rules.)
return not (isinstance(ir_typ, TupleType) and len(ir_typ.members) > 1)
def calculate_type_for_external_return(ir_typ):
if _needs_external_call_wrap(ir_typ):
return TupleType([ir_typ])
return ir_typ
def wrap_value_for_external_return(ir_val):
# used for LHS promotion
if _needs_external_call_wrap(ir_val.typ):
return ir_tuple_from_args([ir_val])
else:
return ir_val
def set_type_for_external_return(ir_val):
# used for RHS promotion
ir_val.typ = calculate_type_for_external_return(ir_val.typ)
# return a dummy IRnode with the given type
def dummy_node_for_type(typ):
return IRnode("fake_node", typ=typ)
def _check_assign_bytes(left, right):
if right.typ.maxlen > left.typ.maxlen:
raise TypeMismatch(f"Cannot cast from {right.typ} to {left.typ}") # pragma: notest
# stricter check for zeroing a byte array.
if right.value == "~empty" and right.typ.maxlen != left.typ.maxlen:
raise TypeMismatch(
f"Bad type for clearing bytes: expected {left.typ} but got {right.typ}"
) # pragma: notest
def _check_assign_list(left, right):
def FAIL(): # pragma: nocover
raise TypeCheckFailure(f"assigning {right.typ} to {left.typ}")
if left.value == "multi":
# Cannot do something like [a, b, c] = [1, 2, 3]
FAIL() # pragma: notest
if isinstance(left, SArrayType):
if not isinstance(right, SArrayType):
FAIL() # pragma: notest
if left.typ.count != right.typ.count:
FAIL() # pragma: notest
# TODO recurse into left, right if literals?
check_assign(dummy_node_for_type(left.typ.subtyp), dummy_node_for_type(right.typ.subtyp))
if isinstance(left, DArrayType):
if not isinstance(right, DArrayType):
FAIL() # pragma: notest
if left.typ.count < right.typ.count:
FAIL() # pragma: notest
# stricter check for zeroing
if right.value == "~empty" and right.typ.count != left.typ.count:
raise TypeCheckFailure(
f"Bad type for clearing bytes: expected {left.typ} but got {right.typ}"
) # pragma: notest
# TODO recurse into left, right if literals?
check_assign(dummy_node_for_type(left.typ.subtyp), dummy_node_for_type(right.typ.subtyp))
def _check_assign_tuple(left, right):
def FAIL(): # pragma: nocover
raise TypeCheckFailure(f"assigning {right.typ} to {left.typ}")
if not isinstance(right.typ, left.typ.__class__):
FAIL() # pragma: notest
if isinstance(left.typ, StructType):
for k in left.typ.members:
if k not in right.typ.members:
FAIL() # pragma: notest
# TODO recurse into left, right if literals?
check_assign(
dummy_node_for_type(left.typ.members[k]),
dummy_node_for_type(right.typ.members[k]),
)
for k in right.typ.members:
if k not in left.typ.members:
FAIL() # pragma: notest
if left.typ.name != right.typ.name:
FAIL() # pragma: notest
else:
if len(left.typ.members) != len(right.typ.members):
FAIL() # pragma: notest
for (l, r) in zip(left.typ.members, right.typ.members):
# TODO recurse into left, right if literals?
check_assign(dummy_node_for_type(l), dummy_node_for_type(r))
# sanity check an assignment
# typechecking source code is done at an earlier phase
# this function is more of a sanity check for typechecking internally
# generated assignments
def check_assign(left, right):
def FAIL(): # pragma: nocover
raise TypeCheckFailure(f"assigning {right.typ} to {left.typ} {left} {right}")
if isinstance(left.typ, ByteArrayLike):
_check_assign_bytes(left, right)
elif isinstance(left.typ, ArrayLike):
_check_assign_list(left, right)
elif isinstance(left.typ, TupleLike):
_check_assign_tuple(left, right)
elif isinstance(left.typ, BaseType):
# TODO once we propagate types from typechecker, introduce this check:
# if left.typ != right.typ:
# FAIL() # pragma: notest
pass
else: # pragma: nocover
FAIL()
_label = 0
# TODO might want to coalesce with Context.fresh_varname and compile_ir.mksymbol
def _freshname(name):
global _label
_label += 1
return f"{name}{_label}"
# returns True if t is ABI encoded and is a type that needs any kind of
# validation
def needs_clamp(t, encoding):
if encoding not in (Encoding.ABI, Encoding.JSON_ABI):
return False
if isinstance(t, (ByteArrayLike, DArrayType)):
if encoding == Encoding.JSON_ABI:
# don't have bytestring size bound from json, don't clamp
return False
return True
if isinstance(t, BaseType) and t.typ not in ("int256", "uint256", "bytes32"):
return True
if isinstance(t, SArrayType):
return needs_clamp(t.subtype, encoding)
if isinstance(t, TupleLike):
return any(needs_clamp(m, encoding) for m in t.tuple_members())
return False
# Create an x=y statement, where the types may be compound
def make_setter(left, right):
check_assign(left, right)
# Basic types
if isinstance(left.typ, BaseType):
enc = right.encoding # unwrap_location butchers encoding
right = unwrap_location(right)
# TODO rethink/streamline the clamp_basetype logic
if needs_clamp(right.typ, enc):
right = clamp_basetype(right)
return STORE(left, right)
# Byte arrays
elif isinstance(left.typ, ByteArrayLike):
# TODO rethink/streamline the clamp_basetype logic
if needs_clamp(right.typ, right.encoding):
with right.cache_when_complex("bs_ptr") as (b, right):
copier = make_byte_array_copier(left, right)
ret = b.resolve(["seq", clamp_bytestring(right), copier])
else:
ret = make_byte_array_copier(left, right)
return IRnode.from_list(ret)
elif isinstance(left.typ, DArrayType):
# TODO should we enable this?
# implicit conversion from sarray to darray
# if isinstance(right.typ, SArrayType):
# return _complex_make_setter(left, right)
# TODO rethink/streamline the clamp_basetype logic
if needs_clamp(right.typ, right.encoding):
with right.cache_when_complex("arr_ptr") as (b, right):
copier = _dynarray_make_setter(left, right)
ret = b.resolve(["seq", clamp_dyn_array(right), copier])
else:
ret = _dynarray_make_setter(left, right)
return IRnode.from_list(ret)
# Arrays
elif isinstance(left.typ, (SArrayType, TupleLike)):
return _complex_make_setter(left, right)
def _complex_make_setter(left, right):
if right.value == "~empty" and left.location == MEMORY:
# optimized memzero
return mzero(left, left.typ.memory_bytes_required)
ret = ["seq"]
if isinstance(left.typ, SArrayType):
n_items = right.typ.count
keys = [IRnode.from_list(i, typ="uint256") for i in range(n_items)]
if isinstance(left.typ, TupleLike):
keys = left.typ.tuple_keys()
# if len(keyz) == 0:
# return IRnode.from_list(["pass"])
# general case
# TODO use copy_bytes when the generated code is above a certain size
with left.cache_when_complex("_L") as (b1, left), right.cache_when_complex("_R") as (b2, right):
for k in keys:
l_i = get_element_ptr(left, k, array_bounds_check=False)
r_i = get_element_ptr(right, k, array_bounds_check=False)
ret.append(make_setter(l_i, r_i))
return b1.resolve(b2.resolve(IRnode.from_list(ret)))
def ensure_in_memory(ir_var, context):
"""Ensure a variable is in memory. This is useful for functions
which expect to operate on memory variables.
"""
if ir_var.location == MEMORY:
return ir_var
typ = ir_var.typ
buf = IRnode.from_list(context.new_internal_variable(typ), typ=typ, location=MEMORY)
do_copy = make_setter(buf, ir_var)
return IRnode.from_list(["seq", do_copy, buf], typ=typ, location=MEMORY)
def eval_seq(ir_node):
"""Tries to find the "return" value of a `seq` statement, in order so
that the value can be known without possibly evaluating side effects
"""
if ir_node.value in ("seq", "with") and len(ir_node.args) > 0:
return eval_seq(ir_node.args[-1])
if isinstance(ir_node.value, int):
return IRnode.from_list(ir_node)
return None
# TODO move return checks to vyper/semantics/validation
def is_return_from_function(node):
if isinstance(node, vy_ast.Expr) and node.get("value.func.id") == "selfdestruct":
return True
if isinstance(node, vy_ast.Return):
return True
elif isinstance(node, vy_ast.Raise):
return True
else:
return False
def check_single_exit(fn_node):
_check_return_body(fn_node, fn_node.body)
for node in fn_node.get_descendants(vy_ast.If):
_check_return_body(node, node.body)
if node.orelse:
_check_return_body(node, node.orelse)
def _check_return_body(node, node_list):
return_count = len([n for n in node_list if is_return_from_function(n)])
if return_count > 1:
raise StructureException(
"Too too many exit statements (return, raise or selfdestruct).", node
)
# Check for invalid code after returns.
last_node_pos = len(node_list) - 1
for idx, n in enumerate(node_list):
if is_return_from_function(n) and idx < last_node_pos:
# is not last statement in body.
raise StructureException(
"Exit statement with succeeding code (that will not execute).", node_list[idx + 1]
)
def mzero(dst, nbytes):
# calldatacopy from past-the-end gives zero bytes.
# cf. YP H.2 (ops section) with CALLDATACOPY spec.
return IRnode.from_list(
# calldatacopy mempos calldatapos len
["calldatacopy", dst, "calldatasize", nbytes],
annotation="mzero",
)
# zero pad a bytearray according to the ABI spec. The last word
# of the byte array needs to be right-padded with zeroes.
def zero_pad(bytez_placeholder):
len_ = ["mload", bytez_placeholder]
dst = ["add", ["add", bytez_placeholder, 32], "len"]
# the runtime length of the data rounded up to nearest 32
# from spec:
# the actual value of X as a byte sequence,
# followed by the *minimum* number of zero-bytes
# such that len(enc(X)) is a multiple of 32.
num_zero_bytes = ["sub", ["ceil32", "len"], "len"]
return IRnode.from_list(
["with", "len", len_, ["with", "dst", dst, mzero("dst", num_zero_bytes)]],
annotation="Zero pad",
)
# convenience rewrites for shr/sar/shl
def shr(bits, x):
if version_check(begin="constantinople"):
return ["shr", bits, x]
return ["div", x, ["exp", 2, bits]]
# convenience rewrites for shr/sar/shl
def shl(bits, x):
if version_check(begin="constantinople"):
return ["shl", bits, x]
return ["mul", x, ["exp", 2, bits]]
def sar(bits, x):
if version_check(begin="constantinople"):
return ["sar", bits, x]
# emulate for older arches. keep in mind note from EIP 145:
# "This is not equivalent to PUSH1 2 EXP SDIV, since it rounds
# differently. See SDIV(-1, 2) == 0, while SAR(-1, 1) == -1."
return ["sdiv", ["add", ["slt", x, 0], x], ["exp", 2, bits]]
def clamp_bytestring(ir_node):
t = ir_node.typ
if not isinstance(t, ByteArrayLike):
raise CompilerPanic(f"{t} passed to clamp_bytestring") # pragma: notest
return ["assert", ["le", get_bytearray_length(ir_node), t.maxlen]]
def clamp_dyn_array(ir_node):
t = ir_node.typ
assert isinstance(t, DArrayType)
return ["assert", ["le", get_dyn_array_count(ir_node), t.count]]
# clampers for basetype
def clamp_basetype(ir_node):
t = ir_node.typ
if not isinstance(t, BaseType):
raise CompilerPanic(f"{t} passed to clamp_basetype") # pragma: notest
# copy of the input
ir_node = unwrap_location(ir_node)
if is_integer_type(t) or is_decimal_type(t):
if t._num_info.bits == 256:
return ir_node
else:
return int_clamp(ir_node, t._num_info.bits, signed=t._num_info.is_signed)
if is_bytes_m_type(t):
if t._bytes_info.m == 32:
return ir_node # special case, no clamp.
else:
return bytes_clamp(ir_node, t._bytes_info.m)
if t.typ in ("address",):
return int_clamp(ir_node, 160)
if t.typ in ("bool",):
return int_clamp(ir_node, 1)
raise CompilerPanic(f"{t} passed to clamp_basetype") # pragma: notest
def int_clamp(ir_node, bits, signed=False):
"""Generalized clamper for integer types. Takes the number of bits,
whether it's signed, and returns an IR node which checks it is
in bounds. (Consumers should use clamp_basetype instead which uses
type-based dispatch and is a little safer.)
"""
if bits >= 256:
raise CompilerPanic(f"invalid clamp: {bits}>=256 ({ir_node})") # pragma: notest
with ir_node.cache_when_complex("val") as (b, val):
if signed:
# example for bits==128:
# promote_signed_int(val, bits) is the "canonical" version of val
# if val is in bounds, the bits above bit 128 should be equal.
# (this works for both val >= 0 and val < 0. in the first case,
# all upper bits should be 0 if val is a valid int128,
# in the latter case, all upper bits should be 1.)
assertion = ["assert", ["eq", val, promote_signed_int(val, bits)]]
else:
assertion = ["assert", ["iszero", shr(bits, val)]]
ret = b.resolve(["seq", assertion, val])
# TODO fix this annotation
return IRnode.from_list(ret, annotation=f"int_clamp {ir_node.typ}")
def bytes_clamp(ir_node: IRnode, n_bytes: int) -> IRnode:
if not (0 < n_bytes <= 32):
raise CompilerPanic(f"bad type: bytes{n_bytes}")
with ir_node.cache_when_complex("val") as (b, val):
assertion = ["assert", ["iszero", shl(n_bytes * 8, val)]]
ret = b.resolve(["seq", assertion, val])
return IRnode.from_list(ret, annotation=f"bytes{n_bytes}_clamp")
# e.g. for int8, promote 255 to -1
def promote_signed_int(x, bits):
assert bits % 8 == 0
ret = ["signextend", bits // 8 - 1, x]
return IRnode.from_list(ret, annotation=f"promote int{bits}")
|
PYSEC-2022-198
|
vyper/codegen/external_call.py
|
@@ -6,10 +6,12 @@
check_assign,
check_external_call,
dummy_node_for_type,
- get_element_ptr,
+ make_setter,
+ needs_clamp,
)
from vyper.codegen.ir_node import Encoding, IRnode
from vyper.codegen.types import InterfaceType, TupleType, get_type_for_exact_size
+from vyper.codegen.types.convert import new_type_to_old_type
from vyper.exceptions import StateAccessViolation, TypeCheckFailure
@@ -59,22 +61,19 @@ def _pack_arguments(contract_sig, args, context):
return buf, mstore_method_id + [encode_args], args_ofst, args_len
-def _returndata_encoding(contract_sig):
- if contract_sig.is_from_json:
- return Encoding.JSON_ABI
- return Encoding.ABI
+def _unpack_returndata(buf, contract_sig, skip_contract_check, context, expr):
+ # expr.func._metadata["type"].return_type is more accurate
+ # than contract_sig.return_type in the case of JSON interfaces.
+ ast_return_t = expr.func._metadata["type"].return_type
-
-def _unpack_returndata(buf, contract_sig, skip_contract_check, context):
- return_t = contract_sig.return_type
- if return_t is None:
+ if ast_return_t is None:
return ["pass"], 0, 0
+ # sanity check
+ return_t = new_type_to_old_type(ast_return_t)
+ check_assign(dummy_node_for_type(return_t), dummy_node_for_type(contract_sig.return_type))
+
return_t = calculate_type_for_external_return(return_t)
- # if the abi signature has a different type than
- # the vyper type, we need to wrap and unwrap the type
- # so that the ABI decoding works correctly
- should_unwrap_abi_tuple = return_t != contract_sig.return_type
abi_return_t = return_t.abi_type
@@ -88,25 +87,30 @@ def _unpack_returndata(buf, contract_sig, skip_contract_check, context):
# revert when returndatasize is not in bounds
ret = []
# runtime: min_return_size <= returndatasize
- # TODO move the -1 optimization to IR optimizer
if not skip_contract_check:
- ret += [["assert", ["gt", "returndatasize", min_return_size - 1]]]
+ ret += [["assert", ["ge", "returndatasize", min_return_size]]]
- # add as the last IRnode a pointer to the return data structure
+ encoding = Encoding.ABI
- # the return type has been wrapped by the calling contract;
- # unwrap it so downstream code isn't confused.
- # basically this expands to buf+32 if the return type has been wrapped
- # in a tuple AND its ABI type is dynamic.
- # in most cases, this simply will evaluate to ret.
- # in the special case where the return type has been wrapped
- # in a tuple AND its ABI type is dynamic, it expands to buf+32.
- buf = IRnode(buf, typ=return_t, encoding=_returndata_encoding(contract_sig), location=MEMORY)
+ buf = IRnode.from_list(
+ buf,
+ typ=return_t,
+ location=MEMORY,
+ encoding=encoding,
+ annotation=f"{expr.node_source_code} returndata buffer",
+ )
- if should_unwrap_abi_tuple:
- buf = get_element_ptr(buf, 0, array_bounds_check=False)
+ assert isinstance(return_t, TupleType)
+ # unpack strictly
+ if needs_clamp(return_t, encoding):
+ buf2 = IRnode.from_list(
+ context.new_internal_variable(return_t), typ=return_t, location=MEMORY
+ )
- ret += [buf]
+ ret.append(make_setter(buf2, buf))
+ ret.append(buf2)
+ else:
+ ret.append(buf)
return ret, ret_ofst, ret_len
@@ -145,7 +149,7 @@ def _external_call_helper(
buf, arg_packer, args_ofst, args_len = _pack_arguments(contract_sig, args_ir, context)
ret_unpacker, ret_ofst, ret_len = _unpack_returndata(
- buf, contract_sig, skip_contract_check, context
+ buf, contract_sig, skip_contract_check, context, expr
)
sub += arg_packer
@@ -169,15 +173,7 @@ def _external_call_helper(
if contract_sig.return_type is not None:
sub += ret_unpacker
- ret = IRnode.from_list(
- sub,
- typ=contract_sig.return_type,
- location=MEMORY,
- # set the encoding to ABI here, downstream code will decode and add clampers.
- encoding=_returndata_encoding(contract_sig),
- )
-
- return ret
+ return IRnode.from_list(sub, typ=contract_sig.return_type, location=MEMORY)
def _get_special_kwargs(stmt_expr, context):
|
import vyper.utils as util
from vyper.address_space import MEMORY
from vyper.codegen.abi_encoder import abi_encode
from vyper.codegen.core import (
calculate_type_for_external_return,
check_assign,
check_external_call,
dummy_node_for_type,
get_element_ptr,
)
from vyper.codegen.ir_node import Encoding, IRnode
from vyper.codegen.types import InterfaceType, TupleType, get_type_for_exact_size
from vyper.exceptions import StateAccessViolation, TypeCheckFailure
def _pack_arguments(contract_sig, args, context):
# abi encoding just treats all args as a big tuple
args_tuple_t = TupleType([x.typ for x in args])
args_as_tuple = IRnode.from_list(["multi"] + [x for x in args], typ=args_tuple_t)
args_abi_t = args_tuple_t.abi_type
# sanity typecheck - make sure the arguments can be assigned
dst_tuple_t = TupleType([arg.typ for arg in contract_sig.args][: len(args)])
check_assign(dummy_node_for_type(dst_tuple_t), args_as_tuple)
if contract_sig.return_type is not None:
return_abi_t = calculate_type_for_external_return(contract_sig.return_type).abi_type
# we use the same buffer for args and returndata,
# so allocate enough space here for the returndata too.
buflen = max(args_abi_t.size_bound(), return_abi_t.size_bound())
else:
buflen = args_abi_t.size_bound()
buflen += 32 # padding for the method id
buf_t = get_type_for_exact_size(buflen)
buf = context.new_internal_variable(buf_t)
args_ofst = buf + 28
args_len = args_abi_t.size_bound() + 4
abi_signature = contract_sig.name + dst_tuple_t.abi_type.selector_name()
# layout:
# 32 bytes | args
# 0x..00<method_id_4bytes> | args
# the reason for the left padding is just so the alignment is easier.
# if we were only targeting constantinople, we could align
# to buf (and also keep code size small) by using
# (mstore buf (shl signature.method_id 224))
mstore_method_id = [["mstore", buf, util.abi_method_id(abi_signature)]]
if len(args) == 0:
encode_args = ["pass"]
else:
encode_args = abi_encode(buf + 32, args_as_tuple, context, bufsz=buflen)
return buf, mstore_method_id + [encode_args], args_ofst, args_len
def _returndata_encoding(contract_sig):
if contract_sig.is_from_json:
return Encoding.JSON_ABI
return Encoding.ABI
def _unpack_returndata(buf, contract_sig, skip_contract_check, context):
return_t = contract_sig.return_type
if return_t is None:
return ["pass"], 0, 0
return_t = calculate_type_for_external_return(return_t)
# if the abi signature has a different type than
# the vyper type, we need to wrap and unwrap the type
# so that the ABI decoding works correctly
should_unwrap_abi_tuple = return_t != contract_sig.return_type
abi_return_t = return_t.abi_type
min_return_size = abi_return_t.min_size()
max_return_size = abi_return_t.size_bound()
assert 0 < min_return_size <= max_return_size
ret_ofst = buf
ret_len = max_return_size
# revert when returndatasize is not in bounds
ret = []
# runtime: min_return_size <= returndatasize
# TODO move the -1 optimization to IR optimizer
if not skip_contract_check:
ret += [["assert", ["gt", "returndatasize", min_return_size - 1]]]
# add as the last IRnode a pointer to the return data structure
# the return type has been wrapped by the calling contract;
# unwrap it so downstream code isn't confused.
# basically this expands to buf+32 if the return type has been wrapped
# in a tuple AND its ABI type is dynamic.
# in most cases, this simply will evaluate to ret.
# in the special case where the return type has been wrapped
# in a tuple AND its ABI type is dynamic, it expands to buf+32.
buf = IRnode(buf, typ=return_t, encoding=_returndata_encoding(contract_sig), location=MEMORY)
if should_unwrap_abi_tuple:
buf = get_element_ptr(buf, 0, array_bounds_check=False)
ret += [buf]
return ret, ret_ofst, ret_len
def _external_call_helper(
contract_address,
contract_sig,
args_ir,
context,
value=None,
gas=None,
skip_contract_check=None,
expr=None,
):
if value is None:
value = 0
if gas is None:
gas = "gas"
if skip_contract_check is None:
skip_contract_check = False
# sanity check
assert len(contract_sig.base_args) <= len(args_ir) <= len(contract_sig.args)
if context.is_constant() and contract_sig.mutability not in ("view", "pure"):
# TODO is this already done in type checker?
raise StateAccessViolation(
f"May not call state modifying function '{contract_sig.name}' "
f"within {context.pp_constancy()}.",
expr,
)
sub = ["seq"]
buf, arg_packer, args_ofst, args_len = _pack_arguments(contract_sig, args_ir, context)
ret_unpacker, ret_ofst, ret_len = _unpack_returndata(
buf, contract_sig, skip_contract_check, context
)
sub += arg_packer
if contract_sig.return_type is None and not skip_contract_check:
# if we do not expect return data, check that a contract exists at the
# target address. we must perform this check BEFORE the call because
# the contract might selfdestruct. on the other hand we can omit this
# when we _do_ expect return data because we later check
# `returndatasize` (that check works even if the contract
# selfdestructs).
sub.append(["assert", ["extcodesize", contract_address]])
if context.is_constant() or contract_sig.mutability in ("view", "pure"):
call_op = ["staticcall", gas, contract_address, args_ofst, args_len, ret_ofst, ret_len]
else:
call_op = ["call", gas, contract_address, value, args_ofst, args_len, ret_ofst, ret_len]
sub.append(check_external_call(call_op))
if contract_sig.return_type is not None:
sub += ret_unpacker
ret = IRnode.from_list(
sub,
typ=contract_sig.return_type,
location=MEMORY,
# set the encoding to ABI here, downstream code will decode and add clampers.
encoding=_returndata_encoding(contract_sig),
)
return ret
def _get_special_kwargs(stmt_expr, context):
from vyper.codegen.expr import Expr # TODO rethink this circular import
value, gas, skip_contract_check = None, None, None
for kw in stmt_expr.keywords:
if kw.arg == "gas":
gas = Expr.parse_value_expr(kw.value, context)
elif kw.arg == "value":
value = Expr.parse_value_expr(kw.value, context)
elif kw.arg == "skip_contract_check":
skip_contract_check = kw.value.value
assert isinstance(skip_contract_check, bool), "type checker missed this"
else:
raise TypeCheckFailure("Unexpected keyword argument")
# TODO maybe return a small dataclass to reduce verbosity
return value, gas, skip_contract_check
def ir_for_external_call(stmt_expr, context):
from vyper.codegen.expr import Expr # TODO rethink this circular import
contract_address = Expr.parse_value_expr(stmt_expr.func.value, context)
value, gas, skip_contract_check = _get_special_kwargs(stmt_expr, context)
args_ir = [Expr(x, context).ir_node for x in stmt_expr.args]
assert isinstance(contract_address.typ, InterfaceType)
contract_name = contract_address.typ.name
method_name = stmt_expr.func.attr
contract_sig = context.sigs[contract_name][method_name]
ret = _external_call_helper(
contract_address,
contract_sig,
args_ir,
context,
value=value,
gas=gas,
skip_contract_check=skip_contract_check,
expr=stmt_expr,
)
ret.annotation = stmt_expr.get("node_source_code")
return ret
|
PYSEC-2022-198
|
vyper/codegen/function_definitions/external_function.py
|
@@ -3,36 +3,14 @@
import vyper.utils as util
from vyper.address_space import CALLDATA, DATA, MEMORY
from vyper.ast.signatures.function_signature import FunctionSignature, VariableRecord
+from vyper.codegen.abi_encoder import abi_encoding_matches_vyper
from vyper.codegen.context import Context
-from vyper.codegen.core import get_element_ptr, getpos, make_setter
+from vyper.codegen.core import get_element_ptr, getpos, make_setter, needs_clamp
from vyper.codegen.expr import Expr
from vyper.codegen.function_definitions.utils import get_nonreentrant_lock
from vyper.codegen.ir_node import Encoding, IRnode
from vyper.codegen.stmt import parse_body
-from vyper.codegen.types.types import (
- BaseType,
- ByteArrayLike,
- DArrayType,
- SArrayType,
- TupleLike,
- TupleType,
-)
-from vyper.exceptions import CompilerPanic
-
-
-def _should_decode(typ):
- # either a basetype which needs to be clamped
- # or a complex type which contains something that
- # needs to be clamped.
- if isinstance(typ, BaseType):
- return typ.typ not in ("int256", "uint256", "bytes32")
- if isinstance(typ, (ByteArrayLike, DArrayType)):
- return True
- if isinstance(typ, SArrayType):
- return _should_decode(typ.subtype)
- if isinstance(typ, TupleLike):
- return any(_should_decode(t) for t in typ.tuple_members())
- raise CompilerPanic(f"_should_decode({typ})") # pragma: notest
+from vyper.codegen.types.types import TupleType
# register function args with the local calling context.
@@ -53,7 +31,7 @@ def _register_function_args(context: Context, sig: FunctionSignature) -> List[IR
arg_ir = get_element_ptr(base_args_ofst, i)
- if _should_decode(arg.typ):
+ if needs_clamp(arg.typ, Encoding.ABI):
# allocate a memory slot for it and copy
p = context.new_variable(arg.name, arg.typ, is_mutable=False)
dst = IRnode(p, typ=arg.typ, location=MEMORY)
@@ -62,6 +40,7 @@ def _register_function_args(context: Context, sig: FunctionSignature) -> List[IR
copy_arg.source_pos = getpos(arg.ast_source)
ret.append(copy_arg)
else:
+ assert abi_encoding_matches_vyper(arg.typ)
# leave it in place
context.vars[arg.name] = VariableRecord(
name=arg.name,
|
from typing import Any, List
import vyper.utils as util
from vyper.address_space import CALLDATA, DATA, MEMORY
from vyper.ast.signatures.function_signature import FunctionSignature, VariableRecord
from vyper.codegen.context import Context
from vyper.codegen.core import get_element_ptr, getpos, make_setter
from vyper.codegen.expr import Expr
from vyper.codegen.function_definitions.utils import get_nonreentrant_lock
from vyper.codegen.ir_node import Encoding, IRnode
from vyper.codegen.stmt import parse_body
from vyper.codegen.types.types import (
BaseType,
ByteArrayLike,
DArrayType,
SArrayType,
TupleLike,
TupleType,
)
from vyper.exceptions import CompilerPanic
def _should_decode(typ):
# either a basetype which needs to be clamped
# or a complex type which contains something that
# needs to be clamped.
if isinstance(typ, BaseType):
return typ.typ not in ("int256", "uint256", "bytes32")
if isinstance(typ, (ByteArrayLike, DArrayType)):
return True
if isinstance(typ, SArrayType):
return _should_decode(typ.subtype)
if isinstance(typ, TupleLike):
return any(_should_decode(t) for t in typ.tuple_members())
raise CompilerPanic(f"_should_decode({typ})") # pragma: notest
# register function args with the local calling context.
# also allocate the ones that live in memory (i.e. kwargs)
def _register_function_args(context: Context, sig: FunctionSignature) -> List[IRnode]:
ret = []
# the type of the calldata
base_args_t = TupleType([arg.typ for arg in sig.base_args])
# tuple with the abi_encoded args
if sig.is_init_func:
base_args_ofst = IRnode(0, location=DATA, typ=base_args_t, encoding=Encoding.ABI)
else:
base_args_ofst = IRnode(4, location=CALLDATA, typ=base_args_t, encoding=Encoding.ABI)
for i, arg in enumerate(sig.base_args):
arg_ir = get_element_ptr(base_args_ofst, i)
if _should_decode(arg.typ):
# allocate a memory slot for it and copy
p = context.new_variable(arg.name, arg.typ, is_mutable=False)
dst = IRnode(p, typ=arg.typ, location=MEMORY)
copy_arg = make_setter(dst, arg_ir)
copy_arg.source_pos = getpos(arg.ast_source)
ret.append(copy_arg)
else:
# leave it in place
context.vars[arg.name] = VariableRecord(
name=arg.name,
pos=arg_ir,
typ=arg.typ,
mutable=False,
location=arg_ir.location,
encoding=Encoding.ABI,
)
return ret
def _annotated_method_id(abi_sig):
method_id = util.abi_method_id(abi_sig)
annotation = f"{hex(method_id)}: {abi_sig}"
return IRnode(method_id, annotation=annotation)
def _generate_kwarg_handlers(context: Context, sig: FunctionSignature) -> List[Any]:
# generate kwarg handlers.
# since they might come in thru calldata or be default,
# allocate them in memory and then fill it in based on calldata or default,
# depending on the signature
# a kwarg handler looks like
# (if (eq _method_id <method_id>)
# copy calldata args to memory
# write default args to memory
# goto external_function_common_ir
def handler_for(calldata_kwargs, default_kwargs):
calldata_args = sig.base_args + calldata_kwargs
# create a fake type so that get_element_ptr works
calldata_args_t = TupleType(list(arg.typ for arg in calldata_args))
abi_sig = sig.abi_signature_for_kwargs(calldata_kwargs)
method_id = _annotated_method_id(abi_sig)
calldata_kwargs_ofst = IRnode(
4, location=CALLDATA, typ=calldata_args_t, encoding=Encoding.ABI
)
# a sequence of statements to strictify kwargs into memory
ret = ["seq"]
# TODO optimize make_setter by using
# TupleType(list(arg.typ for arg in calldata_kwargs + default_kwargs))
# (must ensure memory area is contiguous)
n_base_args = len(sig.base_args)
for i, arg_meta in enumerate(calldata_kwargs):
k = n_base_args + i
dst = context.lookup_var(arg_meta.name).pos
lhs = IRnode(dst, location=MEMORY, typ=arg_meta.typ)
rhs = get_element_ptr(calldata_kwargs_ofst, k, array_bounds_check=False)
copy_arg = make_setter(lhs, rhs)
copy_arg.source_pos = getpos(arg_meta.ast_source)
ret.append(copy_arg)
for x in default_kwargs:
dst = context.lookup_var(x.name).pos
lhs = IRnode(dst, location=MEMORY, typ=x.typ)
lhs.source_pos = getpos(x.ast_source)
kw_ast_val = sig.default_values[x.name] # e.g. `3` in x: int = 3
rhs = Expr(kw_ast_val, context).ir_node
copy_arg = make_setter(lhs, rhs)
copy_arg.source_pos = getpos(x.ast_source)
ret.append(copy_arg)
ret.append(["goto", sig.external_function_base_entry_label])
ret = ["if", ["eq", "_calldata_method_id", method_id], ret]
return ret
ret = ["seq"]
keyword_args = sig.default_args
# allocate variable slots in memory
for arg in keyword_args:
context.new_variable(arg.name, arg.typ, is_mutable=False)
for i, _ in enumerate(keyword_args):
calldata_kwargs = keyword_args[:i]
default_kwargs = keyword_args[i:]
ret.append(handler_for(calldata_kwargs, default_kwargs))
ret.append(handler_for(keyword_args, []))
return ret
# TODO it would be nice if this returned a data structure which were
# amenable to generating a jump table instead of the linear search for
# method_id we have now.
def generate_ir_for_external_function(code, sig, context, check_nonpayable):
# TODO type hints:
# def generate_ir_for_external_function(
# code: vy_ast.FunctionDef, sig: FunctionSignature, context: Context, check_nonpayable: bool,
# ) -> IRnode:
"""Return the IR for an external function. Includes code to inspect the method_id,
enter the function (nonpayable and reentrancy checks), handle kwargs and exit
the function (clean up reentrancy storage variables)
"""
func_type = code._metadata["type"]
nonreentrant_pre, nonreentrant_post = get_nonreentrant_lock(func_type)
# generate handlers for base args and register the variable records
handle_base_args = _register_function_args(context, sig)
# generate handlers for kwargs and register the variable records
kwarg_handlers = _generate_kwarg_handlers(context, sig)
body = ["seq"]
# once optional args have been handled,
# generate the main body of the function
body += handle_base_args
if check_nonpayable and sig.mutability != "payable":
# if the contract contains payable functions, but this is not one of them
# add an assertion that the value of the call is zero
body += [["assert", ["iszero", "callvalue"]]]
body += nonreentrant_pre
body += [parse_body(code.body, context, ensure_terminated=True)]
# wrap the body in labeled block
body = ["label", sig.external_function_base_entry_label, ["var_list"], body]
exit_sequence = ["seq"] + nonreentrant_post
if sig.is_init_func:
pass # init func has special exit sequence generated by module.py
elif context.return_type is None:
exit_sequence += [["stop"]]
else:
exit_sequence += [["return", "ret_ofst", "ret_len"]]
exit_sequence_args = ["var_list"]
if context.return_type is not None:
exit_sequence_args += ["ret_ofst", "ret_len"]
# wrap the exit in a labeled block
exit = ["label", sig.exit_sequence_label, exit_sequence_args, exit_sequence]
# the ir which comprises the main body of the function,
# besides any kwarg handling
func_common_ir = ["seq", body, exit]
if sig.is_default_func or sig.is_init_func:
ret = ["seq"]
# add a goto to make the function entry look like other functions
# (for zksync interpreter)
ret.append(["goto", sig.external_function_base_entry_label])
ret.append(func_common_ir)
else:
ret = kwarg_handlers
# sneak the base code into the kwarg handler
# TODO rethink this / make it clearer
ret[-1][-1].append(func_common_ir)
return IRnode.from_list(ret)
|
PYSEC-2022-198
|
vyper/codegen/ir_node.py
|
@@ -47,8 +47,6 @@ class Encoding(Enum):
VYPER = auto()
# abi encoded, default for args/return values from external funcs
ABI = auto()
- # abi encoded, same as ABI but no clamps for bytestrings
- JSON_ABI = auto()
# future: packed
|
import re
from enum import Enum, auto
from typing import Any, List, Optional, Tuple, Union
from vyper.address_space import AddrSpace
from vyper.codegen.types import BaseType, NodeType, ceil32
from vyper.compiler.settings import VYPER_COLOR_OUTPUT
from vyper.evm.opcodes import get_ir_opcodes
from vyper.exceptions import CodegenPanic, CompilerPanic
from vyper.utils import VALID_IR_MACROS, cached_property
# Set default string representation for ints in IR output.
AS_HEX_DEFAULT = False
if VYPER_COLOR_OUTPUT:
OKBLUE = "\033[94m"
OKMAGENTA = "\033[35m"
OKLIGHTMAGENTA = "\033[95m"
OKLIGHTBLUE = "\033[94m"
ENDC = "\033[0m"
else:
OKBLUE = ""
OKMAGENTA = ""
OKLIGHTMAGENTA = ""
OKLIGHTBLUE = ""
ENDC = ""
class NullAttractor(int):
def __add__(self, other: int) -> "NullAttractor":
return NullAttractor()
def __repr__(self) -> str:
return "None"
__radd__ = __add__
__mul__ = __add__
def push_label_to_stack(labelname: str) -> str:
# items prefixed with `_sym_` are ignored until asm phase
return "_sym_" + labelname
class Encoding(Enum):
# vyper encoding, default for memory variables
VYPER = auto()
# abi encoded, default for args/return values from external funcs
ABI = auto()
# abi encoded, same as ABI but no clamps for bytestrings
JSON_ABI = auto()
# future: packed
# Data structure for IR parse tree
class IRnode:
repr_show_gas = False
gas: int
valency: int
args: List["IRnode"]
value: Union[str, int]
def __init__(
self,
value: Union[str, int],
args: List["IRnode"] = None,
typ: NodeType = None,
location: Optional[AddrSpace] = None,
source_pos: Optional[Tuple[int, int]] = None,
annotation: Optional[str] = None,
mutable: bool = True,
add_gas_estimate: int = 0,
valency: Optional[int] = None,
encoding: Encoding = Encoding.VYPER,
):
if args is None:
args = []
self.value = value
self.args = args
# TODO remove this sanity check once mypy is more thorough
assert isinstance(typ, NodeType) or typ is None, repr(typ)
self.typ = typ
self.location = location
self.source_pos = source_pos
self.annotation = annotation
self.mutable = mutable
self.add_gas_estimate = add_gas_estimate
self.encoding = encoding
self.as_hex = AS_HEX_DEFAULT
# Optional annotation properties for gas estimation
self.total_gas = None
self.func_name = None
def _check(condition, err):
if not condition:
raise CompilerPanic(str(err))
_check(self.value is not None, "None is not allowed as IRnode value")
# Determine this node's valency (1 if it pushes a value on the stack,
# 0 otherwise) and checks to make sure the number and valencies of
# children are correct. Also, find an upper bound on gas consumption
# Numbers
if isinstance(self.value, int):
_check(len(self.args) == 0, "int can't have arguments")
self.valency = 1
self.gas = 5
elif isinstance(self.value, str):
# Opcodes and pseudo-opcodes (e.g. clamp)
if self.value.upper() in get_ir_opcodes():
_, ins, outs, gas = get_ir_opcodes()[self.value.upper()]
self.valency = outs
_check(
len(self.args) == ins,
f"Number of arguments mismatched: {self.value} {self.args}",
)
# We add 2 per stack height at push time and take it back
# at pop time; this makes `break` easier to handle
self.gas = gas + 2 * (outs - ins)
for arg in self.args:
# pop and pass are used to push/pop values on the stack to be
# consumed for internal functions, therefore we whitelist this as a zero valency
# allowed argument.
zero_valency_whitelist = {"pass", "pop"}
_check(
arg.valency == 1 or arg.value in zero_valency_whitelist,
f"invalid argument to `{self.value}`: {arg}",
)
self.gas += arg.gas
# Dynamic gas cost: 8 gas for each byte of logging data
if self.value.upper()[0:3] == "LOG" and isinstance(self.args[1].value, int):
self.gas += self.args[1].value * 8
# Dynamic gas cost: non-zero-valued call
if self.value.upper() == "CALL" and self.args[2].value != 0:
self.gas += 34000
# Dynamic gas cost: filling sstore (ie. not clearing)
elif self.value.upper() == "SSTORE" and self.args[1].value != 0:
self.gas += 15000
# Dynamic gas cost: calldatacopy
elif self.value.upper() in ("CALLDATACOPY", "CODECOPY", "EXTCODECOPY"):
size = 34000
size_arg_index = 3 if self.value.upper() == "EXTCODECOPY" else 2
size_arg = self.args[size_arg_index]
if isinstance(size_arg.value, int):
size = size_arg.value
self.gas += ceil32(size) // 32 * 3
# Gas limits in call
if self.value.upper() == "CALL" and isinstance(self.args[0].value, int):
self.gas += self.args[0].value
# If statements
elif self.value == "if":
if len(self.args) == 3:
self.gas = self.args[0].gas + max(self.args[1].gas, self.args[2].gas) + 3
if len(self.args) == 2:
self.gas = self.args[0].gas + self.args[1].gas + 17
_check(
self.args[0].valency > 0,
f"zerovalent argument as a test to an if statement: {self.args[0]}",
)
_check(len(self.args) in (2, 3), "if statement can only have 2 or 3 arguments")
self.valency = self.args[1].valency
# With statements: with <var> <initial> <statement>
elif self.value == "with":
_check(len(self.args) == 3, self)
_check(
len(self.args[0].args) == 0 and isinstance(self.args[0].value, str),
f"first argument to with statement must be a variable name: {self.args[0]}",
)
_check(
self.args[1].valency == 1 or self.args[1].value == "pass",
f"zerovalent argument to with statement: {self.args[1]}",
)
self.valency = self.args[2].valency
self.gas = sum([arg.gas for arg in self.args]) + 5
# Repeat statements: repeat <index_name> <startval> <rounds> <rounds_bound> <body>
elif self.value == "repeat":
_check(
len(self.args) == 5, "repeat(index_name, startval, rounds, rounds_bound, body)"
)
counter_ptr = self.args[0]
start = self.args[1]
repeat_count = self.args[2]
repeat_bound = self.args[3]
body = self.args[4]
_check(
isinstance(repeat_bound.value, int) and repeat_bound.value > 0,
f"repeat bound must be a compile-time positive integer: {self.args[2]}",
)
_check(repeat_count.valency == 1, repeat_count)
_check(counter_ptr.valency == 1, counter_ptr)
_check(start.valency == 1, start)
self.valency = 0
self.gas = counter_ptr.gas + start.gas
self.gas += 3 # gas for repeat_bound
int_bound = int(repeat_bound.value)
self.gas += int_bound * (body.gas + 50) + 30
if repeat_count != repeat_bound:
# gas for assert(repeat_count <= repeat_bound)
self.gas += 18
# Seq statements: seq <statement> <statement> ...
elif self.value == "seq":
self.valency = self.args[-1].valency if self.args else 0
self.gas = sum([arg.gas for arg in self.args]) + 30
# GOTO is a jump with args
# e.g. (goto my_label x y z) will push x y and z onto the stack,
# then JUMP to my_label.
elif self.value in ("goto", "exit_to"):
for arg in self.args:
_check(
arg.valency == 1 or arg.value == "pass",
f"zerovalent argument to goto {arg}",
)
self.valency = 0
self.gas = sum([arg.gas for arg in self.args])
elif self.value == "label":
if not self.args[1].value == "var_list":
raise CodegenPanic(f"2nd argument to label must be var_list, {self}")
self.valency = 0
self.gas = 1 + sum(t.gas for t in self.args)
# var_list names a variable number stack variables
elif self.value == "var_list":
for arg in self.args:
if not isinstance(arg.value, str) or len(arg.args) > 0:
raise CodegenPanic(f"var_list only takes strings: {self.args}")
self.valency = 0
self.gas = 0
# Multi statements: multi <expr> <expr> ...
elif self.value == "multi":
for arg in self.args:
_check(
arg.valency > 0, f"Multi expects all children to not be zerovalent: {arg}"
)
self.valency = sum([arg.valency for arg in self.args])
self.gas = sum([arg.gas for arg in self.args])
elif self.value == "deploy":
self.valency = 0
self.gas = NullAttractor() # unknown
# Stack variables
else:
self.valency = 1
self.gas = 3
elif self.value is None:
self.valency = 1
# None IRnodes always get compiled into something else, e.g.
# mzero or PUSH1 0, and the gas will get re-estimated then.
self.gas = 3
else:
raise CompilerPanic(f"Invalid value for IR AST node: {self.value}")
assert isinstance(self.args, list)
if valency is not None:
self.valency = valency
self.gas += self.add_gas_estimate
# the IR should be cached.
# TODO make this private. turns out usages are all for the caching
# idiom that cache_when_complex addresses
@property
def is_complex_ir(self):
# list of items not to cache. note can add other env variables
# which do not change, e.g. calldatasize, coinbase, etc.
do_not_cache = {"~empty"}
return (
isinstance(self.value, str)
and (self.value.lower() in VALID_IR_MACROS or self.value.upper() in get_ir_opcodes())
and self.value.lower() not in do_not_cache
)
@property
def is_literal(self):
return isinstance(self.value, int) or self.value == "multi"
@property
def is_pointer(self):
# not used yet but should help refactor/clarify downstream code
# eventually
return self.location is not None
# This function is slightly confusing but abstracts a common pattern:
# when an IR value needs to be computed once and then cached as an
# IR value (if it is expensive, or more importantly if its computation
# includes side-effects), cache it as an IR variable named with the
# `name` param, and execute the `body` with the cached value. Otherwise,
# run the `body` without caching the IR variable.
# Note that this may be an unneeded abstraction in the presence of an
# arbitrarily powerful optimization framework (which can detect unneeded
# caches) but for now still necessary - CMC 2021-12-11.
# usage:
# ```
# with ir_node.cache_when_complex("foo") as builder, foo:
# ret = some_function(foo)
# return builder.resolve(ret)
# ```
def cache_when_complex(self, name):
# this creates a magical block which maps to IR `with`
class _WithBuilder:
def __init__(self, ir_node, name):
# TODO figure out how to fix this circular import
from vyper.ir.optimizer import optimize
self.ir_node = ir_node
# for caching purposes, see if the ir_node will be optimized
# because a non-literal expr could turn into a literal,
# (e.g. `(add 1 2)`)
# TODO this could really be moved into optimizer.py
self.should_cache = optimize(ir_node).is_complex_ir
# a named IR variable which represents the
# output of `ir_node`
self.ir_var = IRnode.from_list(
name, typ=ir_node.typ, location=ir_node.location, encoding=ir_node.encoding
)
def __enter__(self):
if self.should_cache:
# return the named cache
return self, self.ir_var
else:
# it's a constant (or will be optimized to one), just return that
return self, self.ir_node
def __exit__(self, *args):
pass
# MUST be called at the end of building the expression
# in order to make sure the expression gets wrapped correctly
def resolve(self, body):
if self.should_cache:
ret = ["with", self.ir_var, self.ir_node, body]
if isinstance(body, IRnode):
return IRnode.from_list(
ret, typ=body.typ, location=body.location, encoding=body.encoding
)
else:
return ret
else:
return body
return _WithBuilder(self, name)
@cached_property
def contains_self_call(self):
return getattr(self, "is_self_call", False) or any(x.contains_self_call for x in self.args)
def __getitem__(self, i):
return self.to_list()[i]
def __len__(self):
return len(self.to_list())
# TODO this seems like a not useful and also confusing function
# check if dead code and remove - CMC 2021-12-13
def to_list(self):
return [self.value] + [a.to_list() for a in self.args]
def __eq__(self, other):
return (
self.value == other.value
and self.args == other.args
and self.typ == other.typ
and self.location == other.location
and self.source_pos == other.source_pos
and self.annotation == other.annotation
and self.mutable == other.mutable
and self.add_gas_estimate == other.add_gas_estimate
and self.valency == other.valency
)
@property
def repr_value(self):
if isinstance(self.value, int) and self.as_hex:
return hex(self.value)
if not isinstance(self.value, str):
return str(self.value)
return self.value
@staticmethod
def _colorise_keywords(val):
if val.lower() in VALID_IR_MACROS: # highlight macro
return OKLIGHTMAGENTA + val + ENDC
elif val.upper() in get_ir_opcodes().keys():
return OKMAGENTA + val + ENDC
return val
def repr(self) -> str:
if not len(self.args):
if self.annotation:
return f"{self.repr_value} " + OKLIGHTBLUE + f"<{self.annotation}>" + ENDC
else:
return str(self.repr_value)
# x = repr(self.to_list())
# if len(x) < 80:
# return x
o = ""
if self.annotation:
o += f"/* {self.annotation} */ \n"
if self.repr_show_gas and self.gas:
o += OKBLUE + "{" + ENDC + str(self.gas) + OKBLUE + "} " + ENDC # add gas for info.
o += "[" + self._colorise_keywords(self.repr_value)
prev_lineno = self.source_pos[0] if self.source_pos else None
arg_lineno = None
annotated = False
has_inner_newlines = False
for arg in self.args:
o += ",\n "
arg_lineno = arg.source_pos[0] if arg.source_pos else None
if arg_lineno is not None and arg_lineno != prev_lineno and self.value in ("seq", "if"):
o += f"# Line {(arg_lineno)}\n "
prev_lineno = arg_lineno
annotated = True
arg_repr = arg.repr()
if "\n" in arg_repr:
has_inner_newlines = True
sub = arg_repr.replace("\n", "\n ").strip(" ")
o += self._colorise_keywords(sub)
output = o.rstrip(" ") + "]"
output_on_one_line = re.sub(r",\n *", ", ", output).replace("\n", "")
should_output_single_line = (
(len(output_on_one_line) < 80 or len(self.args) == 1) and not annotated
) and not has_inner_newlines
if should_output_single_line:
return output_on_one_line
else:
return output
def __repr__(self):
return self.repr()
@classmethod
def from_list(
cls,
obj: Any,
typ: NodeType = None,
location: Optional[AddrSpace] = None,
source_pos: Optional[Tuple[int, int]] = None,
annotation: Optional[str] = None,
mutable: bool = True,
add_gas_estimate: int = 0,
valency: Optional[int] = None,
encoding: Encoding = Encoding.VYPER,
) -> "IRnode":
if isinstance(typ, str):
typ = BaseType(typ)
if isinstance(obj, IRnode):
# note: this modify-and-returnclause is a little weird since
# the input gets modified. CC 20191121.
if typ is not None:
obj.typ = typ
if obj.source_pos is None:
obj.source_pos = source_pos
if obj.location is None:
obj.location = location
if obj.encoding is None:
obj.encoding = encoding
return obj
elif not isinstance(obj, list):
return cls(
obj,
[],
typ,
location=location,
annotation=annotation,
mutable=mutable,
add_gas_estimate=add_gas_estimate,
valency=valency,
encoding=encoding,
)
else:
return cls(
obj[0],
[cls.from_list(o, source_pos=source_pos) for o in obj[1:]],
typ,
location=location,
annotation=annotation,
mutable=mutable,
source_pos=source_pos,
add_gas_estimate=add_gas_estimate,
valency=valency,
encoding=encoding,
)
|
PYSEC-2022-198
|
vyper/codegen/types/convert.py
|
@@ -32,7 +32,7 @@ def new_type_to_old_type(typ: new.BasePrimitive) -> old.NodeType:
if isinstance(typ, new.DynamicArrayDefinition):
return old.DArrayType(new_type_to_old_type(typ.value_type), typ.length)
if isinstance(typ, new.TupleDefinition):
- return old.TupleType(typ.value_type)
+ return old.TupleType([new_type_to_old_type(t) for t in typ.value_type])
if isinstance(typ, new.StructDefinition):
return old.StructType(
{n: new_type_to_old_type(t) for (n, t) in typ.members.items()}, typ._id
|
# transition module to convert from new types to old types
import vyper.codegen.types as old
import vyper.semantics.types as new
from vyper.exceptions import InvalidType
def new_type_to_old_type(typ: new.BasePrimitive) -> old.NodeType:
if isinstance(typ, new.BoolDefinition):
return old.BaseType("bool")
if isinstance(typ, new.AddressDefinition):
return old.BaseType("address")
if isinstance(typ, new.InterfaceDefinition):
return old.InterfaceType(typ._id)
if isinstance(typ, new.BytesMDefinition):
m = typ._length # type: ignore
return old.BaseType(f"bytes{m}")
if isinstance(typ, new.BytesArrayDefinition):
return old.ByteArrayType(typ.length)
if isinstance(typ, new.StringDefinition):
return old.StringType(typ.length)
if isinstance(typ, new.DecimalDefinition):
return old.BaseType("decimal")
if isinstance(typ, new.SignedIntegerAbstractType):
bits = typ._bits # type: ignore
return old.BaseType("int" + str(bits))
if isinstance(typ, new.UnsignedIntegerAbstractType):
bits = typ._bits # type: ignore
return old.BaseType("uint" + str(bits))
if isinstance(typ, new.ArrayDefinition):
return old.SArrayType(new_type_to_old_type(typ.value_type), typ.length)
if isinstance(typ, new.DynamicArrayDefinition):
return old.DArrayType(new_type_to_old_type(typ.value_type), typ.length)
if isinstance(typ, new.TupleDefinition):
return old.TupleType(typ.value_type)
if isinstance(typ, new.StructDefinition):
return old.StructType(
{n: new_type_to_old_type(t) for (n, t) in typ.members.items()}, typ._id
)
raise InvalidType(f"unknown type {typ}")
|
PYSEC-2022-198
|
tensorflow/python/ops/image_ops_test.py
|
@@ -4175,6 +4175,25 @@ def testPad(self):
self._assertReturns(x, x_shape, y, y_shape)
+class ResizeNearestNeighborGrad(test_util.TensorFlowTestCase):
+
+ def testSizeTooLarge(self):
+ align_corners = True
+ half_pixel_centers = False
+ grads = constant_op.constant(1, shape=[1, 8, 16, 3], dtype=dtypes.float16)
+ size = constant_op.constant([1879048192, 1879048192],
+ shape=[2],
+ dtype=dtypes.int32)
+ with self.assertRaisesRegex(errors.InvalidArgumentError,
+ r"Encountered overflow when multiplying"):
+ self.evaluate(
+ gen_image_ops.ResizeNearestNeighborGrad(
+ grads=grads,
+ size=size,
+ align_corners=align_corners,
+ half_pixel_centers=half_pixel_centers))
+
+
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
import colorsys
import contextlib
import functools
import itertools
import math
import os
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import get_single_element
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config as tf_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session():
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = self.evaluate(rgb)
self.assertAllClose(rgb_tf, rgb_np)
def testRGBToHSVDataTypes(self):
# Test case for GitHub issue 54855.
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for dtype in [
dtypes.float32, dtypes.float64, dtypes.float16, dtypes.bfloat16
]:
with self.cached_session(use_gpu=False):
rgb = math_ops.cast(
np.array(data, np.float32).reshape([2, 2, 3]) / 255., dtype=dtype)
hsv = image_ops.rgb_to_hsv(rgb)
val = image_ops.hsv_to_rgb(hsv)
out = self.evaluate(val)
self.assertAllClose(rgb, out, atol=1e-2)
class RGBToYIQTest(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yiq and yiq_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class RGBToYUVTest(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yuv and yuv_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in range(images.shape[0]):
for y in range(images.shape[1]):
for x in range(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testGrayscaleToRGBInputValidation(self):
# tests whether the grayscale_to_rgb function raises
# an exception if the input images' last dimension is
# not of size 1, i.e. the images have shape
# [batch size, height, width] or [height, width]
# tests if an exception is raised if a three dimensional
# input is used, i.e. the images have shape [batch size, height, width]
with self.cached_session():
# 3-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "Last dimension of a grayscale image should be size 1"
with self.assertRaisesRegex(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
# tests if an exception is raised if a two dimensional
# input is used, i.e. the images have shape [height, width]
with self.cached_session():
# 1-D input without batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "must be at least two-dimensional"
with self.assertRaisesRegex(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.cached_session():
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.cached_session():
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.cached_session():
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.cached_session():
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
def test_adjust_gamma_less_zero_float32(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_gamma(x, gamma=-1)
def test_adjust_gamma_less_zero_uint8(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_gamma(x, gamma=-1)
def test_adjust_gamma_less_zero_tensor(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = constant_op.constant(-1.0, dtype=dtypes.float32)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image = image_ops.adjust_gamma(x, gamma=y)
self.evaluate(image)
def _test_adjust_gamma_uint8(self, gamma):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 255, (8, 8)).astype(np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = np.trunc(self.evaluate(y))
# calculate gamma correction using numpy
# firstly, transform uint8 to float representation
# then perform correction
y_np = np.power(x_np / 255.0, gamma)
# convert correct numpy image back to uint8 type
y_np = np.trunc(np.clip(y_np * 255.5, 0, 255.0))
self.assertAllClose(y_tf, y_np, 1e-6)
def _test_adjust_gamma_float32(self, gamma):
"""Verifying the output with expected results for gamma
correction for float32 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 1.0, (8, 8))
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = self.evaluate(y)
y_np = np.clip(np.power(x_np, gamma), 0, 1.0)
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_one_float32(self):
"""Same image should be returned for gamma equal to one"""
self._test_adjust_gamma_float32(1.0)
def test_adjust_gamma_one_uint8(self):
self._test_adjust_gamma_uint8(1.0)
def test_adjust_gamma_zero_uint8(self):
"""White image should be returned for gamma equal
to zero for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.0)
def test_adjust_gamma_less_one_uint8(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.5)
def test_adjust_gamma_greater_one_uint8(self):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=1.0)
def test_adjust_gamma_less_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for float32 images
"""
self._test_adjust_gamma_float32(0.5)
def test_adjust_gamma_greater_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two for float32 images
"""
self._test_adjust_gamma_float32(1.0)
def test_adjust_gamma_zero_float32(self):
"""White image should be returned for gamma equal
to zero for float32 images
"""
self._test_adjust_gamma_float32(0.0)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in range(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.cached_session():
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = self.evaluate(y)
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegex(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
def testInvalidDeltaValue(self):
"""Delta value must be in the inetrval of [-1,1]."""
if not context.executing_eagerly():
self.skipTest("Eager mode only")
else:
with self.cached_session():
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = r"delta must be in the interval \[-1, 1\]"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_hue(x, delta=1.5)
class FlipImageBenchmark(test.Benchmark):
def _benchmarkFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkRandomFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):
image_shape = [16, 299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: "
"%.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkFlipLeftRightCpu1(self):
self._benchmarkFlipLeftRight("/cpu:0", 1)
def benchmarkFlipLeftRightCpuAll(self):
self._benchmarkFlipLeftRight("/cpu:0", None)
def benchmarkFlipLeftRightGpu(self):
self._benchmarkFlipLeftRight(test.gpu_device_name(), None)
def benchmarkRandomFlipLeftRightCpu1(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", 1)
def benchmarkRandomFlipLeftRightCpuAll(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", None)
def benchmarkRandomFlipLeftRightGpu(self):
self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)
def benchmarkBatchedRandomFlipLeftRightCpu1(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1)
def benchmarkBatchedRandomFlipLeftRightCpuAll(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None)
def benchmarkBatchedRandomFlipLeftRightGpu(self):
self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for _ in range(warmup_rounds):
self.evaluate(run_op)
start = time.time()
for _ in range(benchmark_rounds):
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in range(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in range(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in range(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchSaturation(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in range(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.cached_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = self.evaluate(image_ops.adjust_saturation(x_np, scale))
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testInvolutionLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipLeftRightStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down),
)
def testRandomFlipStateless(self, func):
with test_util.use_gpu():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
if y_tf_eval[0][0] == 1:
self.assertAllEqual(y_tf_eval, x_np)
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval, y_np)
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
# Verify that results are deterministic.
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down)
)
def testRandomFlipStatelessWithBatch(self, func):
with test_util.use_gpu():
batch_size = 16
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([1, 2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
for j in range(batch_size):
if y_tf_eval[j][0][0] == 1:
self.assertAllEqual(y_tf_eval[j], x_np[j])
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval[j], y_np[j])
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
def testRandomFlipLeftRightWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [3, 2, 1]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipUpDownStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
def testRandomFlipUpDownWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
dtype=np.uint8).reshape([2, 3, 2, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testPartialShapes(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims_3 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_dims_4 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_unknown_batch = array_ops.placeholder(
dtypes.uint8, shape=[None, 64, 64, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
#Ops that support 3D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertIsNone(transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims_3 = op(p_unknown_dims_3)
self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegex(ValueError, "must be > 0"):
op(p_zero_dim)
#Ops that support 4D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_dims_4 = op(p_unknown_dims_4)
self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
transformed_unknown_batch = op(p_unknown_batch)
self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
with self.assertRaisesRegex(ValueError,
"must be at least three-dimensional"):
op(p_wrong_rank)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session():
rotated = image
for _ in range(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90GroupOrderWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session():
rotated = image
for _ in range(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session():
for k in range(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testRot90NumpyEquivalenceWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session():
for k in range(4):
y_np = np.rot90(image, k=k, axes=(1, 2))
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testFlipImageUnknownShape(self):
expected_output = constant_op.constant([[[[3, 4, 5], [0, 1, 2]],
[[9, 10, 11], [6, 7, 8]]]])
def generator():
image_input = np.array(
[[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], np.int32)
yield image_input
dataset = dataset_ops.Dataset.from_generator(
generator,
output_types=dtypes.int32,
output_shapes=tensor_shape.TensorShape([1, 2, 2, 3]))
dataset = dataset.map(image_ops.flip_left_right)
image_flipped_via_dataset_map = get_single_element.get_single_element(
dataset.take(1))
self.assertAllEqual(image_flipped_via_dataset_map, expected_output)
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float64).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float64).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session():
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
def testContrastFactorShape(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"contrast_factor must be scalar|"
"Shape must be rank 0 but is rank 1"):
image_ops.adjust_contrast(x_np, [2.0])
@test_util.run_in_graph_and_eager_modes
def testDeterminismUnimplementedExceptionThrowing(self):
"""Test d9m-unimplemented exception-throwing when op-determinism is enabled.
This test depends upon other tests, tests which do not enable
op-determinism, to ensure that determinism-unimplemented exceptions are not
erroneously thrown when op-determinism is not enabled.
"""
if test_util.is_xla_enabled():
self.skipTest('XLA implementation does not raise exception')
with self.session(), test_util.deterministic_ops():
input_shape = (1, 2, 2, 1)
on_gpu = len(tf_config.list_physical_devices("GPU"))
# AdjustContrast seems to now be inaccessible via the Python API.
# AdjustContrastv2 only supports float16 and float32 on GPU, and other
# types are converted to and from float32 at the Python level before
# AdjustContrastv2 is called.
dtypes_to_test = [
dtypes.uint8, dtypes.int8, dtypes.int16, dtypes.int32, dtypes.float32,
dtypes.float64
]
if on_gpu:
dtypes_to_test.append(dtypes.float16)
ctx_mgr = self.assertRaisesRegex(
errors.UnimplementedError,
"A deterministic GPU implementation of AdjustContrastv2 is not" +
" currently available.")
else:
ctx_mgr = contextlib.suppress()
for dtype in dtypes_to_test:
input_images = array_ops.zeros(input_shape, dtype=dtype)
contrast_factor = 1.
with ctx_mgr:
output_images = image_ops.adjust_contrast(input_images,
contrast_factor)
self.evaluate(output_images)
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta, tol=1e-6):
with self.cached_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, tol)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat32(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat16(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
mn = np.mean(x)
std = np.std(x)
stddev = max(std, 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
@parameterized.named_parameters([("_int8", np.int8), ("_int16", np.int16),
("_int32", np.int32), ("_int64", np.int64),
("_uint8", np.uint8), ("_uint16", np.uint16),
("_uint32", np.uint32),
("_uint64", np.uint64),
("_float32", np.float32)])
def testBasic(self, data_type):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=data_type).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.cached_session():
x = constant_op.constant(x_np, dtype=data_type, shape=x_shape)
y = image_ops.per_image_standardization(x)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.cached_session():
whiten_np = self.evaluate(whiten)
self.assertFalse(np.any(np.isnan(whiten_np)))
def testBatchWhitening(self):
imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3])
whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np]
with self.cached_session():
imgs = constant_op.constant(imgs_np)
whiten = image_ops.per_image_standardization(imgs)
whiten_tf = self.evaluate(whiten)
for w_tf, w_np in zip(whiten_tf, whiten_np):
self.assertAllClose(w_tf, w_np, atol=1e-4)
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
with self.cached_session():
return self.evaluate(y)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (
([-1, 0, 3, 3], "offset_height must be >= 0"),
([0, -1, 3, 3], "offset_width must be >= 0"),
([0, 0, 0, 3], "target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], r"height must be >= target \+ offset"),
([0, 2, 3, 3], r"width must be >= target \+ offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.name.startswith("crop_to_bounding_box"))
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shapes = [[13, 9, 3], [5, 13, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
x_shape = [2, 4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],
[[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testCropping2(self):
# Test case for 10315
x_shapes = [[240, 320, 3], [5, 240, 320, 3]]
expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]
for x_shape, y_shape in zip(x_shapes, expected_y_shapes):
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y_tf = self.evaluate(image_ops.central_crop(x_np, 0.33))
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test no-op fraction=1.0, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
# Test no-op fraction=0.5, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])
self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])
self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])
self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# Test no-op fraction=1.0, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])
self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])
self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])
self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])
self._assertShapeInference([5, None, None, None], 1.0,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 1.0,
[None, None, None, None])
# Test no-op fraction=0.5, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])
self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])
self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])
self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])
self._assertShapeInference([5, None, None, None], 0.5,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 0.5,
[None, None, None, None])
def testErrorOnInvalidCentralCropFractionValues(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
def testErrorOnInvalidShapes(self):
x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.5)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y = image_ops.central_crop(x_np, 1.0)
self.assertTrue(y.op.name.startswith("central_crop"))
def testCentralFractionTensor(self):
# Test case for GitHub issue 45324.
x_shape = [240, 320, 3]
y_shape = [80, 106, 3]
@def_function.function(autograph=False)
def f(x, central_fraction):
return image_ops.central_crop(x, central_fraction)
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
y_tf = self.evaluate(f(x_np, constant_op.constant(0.33)))
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
class PadToBoundingBoxTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def pad_bbox(*args):
return image_ops.pad_to_bounding_box(*args)
with self.cached_session():
return self.evaluate(pad_bbox(x_tensor, offset_height, offset_width,
target_height, target_width))
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])
with self.cached_session():
self.assertAllClose(y, self.evaluate(y_tf))
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParamsScalarInputs(self):
# In this test, inputs do not get converted to tensors before calling the
# tf.function. The error message here is raised in python
# since the python function has direct access to the scalars.
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = (
(-1, 0, 4, 4,
"offset_height must be >= 0"),
(0, -1, 4, 4,
"offset_width must be >= 0"),
(2, 0, 4, 4,
"height must be <= target - offset"),
(0, 2, 4, 4,
"width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(
x, x_shape, *config_item, use_tensor_inputs_options=[False])
def testBadParamsTensorInputsEager(self):
# In this test inputs get converted to EagerTensors before calling the
# tf.function. The error message here is raised in python
# since the python function has direct access to the tensor's values.
with context.eager_mode():
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = (
(-1, 0, 4, 4,
"offset_height must be >= 0"),
(0, -1, 4, 4,
"offset_width must be >= 0"),
(2, 0, 4, 4,
"height must be <= target - offset"),
(0, 2, 4, 4,
"width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(
x, x_shape, *config_item, use_tensor_inputs_options=[True])
@parameterized.named_parameters([("OffsetHeight", (-1, 0, 4, 4)),
("OffsetWidth", (0, -1, 4, 4)),
("Height", (2, 0, 4, 4)),
("Width", (0, 2, 4, 4))])
def testBadParamsTensorInputsGraph(self, config):
# In this test inputs get converted to tensors before calling the
# tf.function. The error message here is raised during shape inference.
with context.graph_mode():
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
self._assertRaises(
x,
x_shape,
*config,
"Paddings must be non-negative",
use_tensor_inputs_options=[True])
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
def testInvalidInput(self):
# Test case for GitHub issue 46890.
if test_util.is_xla_enabled():
# TODO(b/200850176): test fails with XLA.
return
with self.session():
with self.assertRaises(errors_impl.InvalidArgumentError):
v = image_ops.pad_to_bounding_box(
image=np.ones((1, 1, 1)),
target_height=5191549470,
target_width=5191549470,
offset_height=1,
offset_width=1)
self.evaluate(v)
class ImageProjectiveTransformV2(test_util.TensorFlowTestCase):
def testShapeTooLarge(self):
interpolation = "BILINEAR"
fill_mode = "REFLECT"
images = constant_op.constant(
0.184634328, shape=[2, 5, 8, 3], dtype=dtypes.float32)
transforms = constant_op.constant(
0.378575385, shape=[2, 8], dtype=dtypes.float32)
output_shape = constant_op.constant([1879048192, 1879048192],
shape=[2],
dtype=dtypes.int32)
with self.assertRaisesRegex(errors.InvalidArgumentError,
r"Encountered overflow when multiplying"):
self.evaluate(
gen_image_ops.ImageProjectiveTransformV2(
images=images,
transforms=transforms,
output_shape=output_shape,
interpolation=interpolation,
fill_mode=fill_mode))
class InternalPadToBoundingBoxTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _InternalPadToBoundingBox(self, x, offset_height, offset_width,
target_height, target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def pad_bbox(*args):
return image_ops.pad_to_bounding_box_internal(*args, check_dims=False)
with self.cached_session():
return self.evaluate(
pad_bbox(x_tensor, offset_height, offset_width, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._InternalPadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box_internal(
image, 0, 0, height, width, check_dims=False)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box_internal(
x, i[0], i[1], i[2], i[3], check_dims=False)
with self.cached_session():
self.assertAllClose(y, self.evaluate(y_tf))
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box_internal(
image, 0, 0, 55, 66, check_dims=False)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.cached_session():
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in range(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# min_object_covered as tensor
min_object_covered_t = ops.convert_to_tensor(min_object_covered)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered_t,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in range(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=array_ops.placeholder(dtypes.float32),
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
def testDefaultMinObjectCovered(self):
# By default min_object_covered=0.1 if not provided
with self.cached_session():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
def _testStatelessSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered,
aspect_ratio_range, area_range):
with test_util.use_gpu():
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
iterations = 2
test_seeds = [(1, 2), (3, 4), (5, 6)]
for seed in test_seeds:
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
for _ in range(iterations):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(bounding_box_np,
dtype=dtypes.float32,
shape=bounding_box_np.shape)
begin, size, _ = image_ops.stateless_sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
seed=seed,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratio = area / original_area
area_ratios.append(area_ratio)
fraction_object_covered.append(
float(np.sum(y_tf)) / bounding_box_area)
# Check that `area_ratio` is within valid range.
self.assertLessEqual(area_ratio, area_range[1])
self.assertGreaterEqual(area_ratio, area_range[0])
# Each array should consist of one value just repeated `iteration` times
# because the same seed is used.
self.assertEqual(len(set(aspect_ratios)), 1)
self.assertEqual(len(set(area_ratios)), 1)
self.assertEqual(len(set(fraction_object_covered)), 1)
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testWholeImageBoundingBoxStateless(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
for min_obj_covered in [0.1, constant_op.constant(0.1)]:
self._testStatelessSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=min_obj_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testWithBoundingBoxStateless(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
# Test both scalar and tensor input for `min_object_covered`.
for min_obj_covered in [0.1, constant_op.constant(0.1)]:
self._testStatelessSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_obj_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShapeStateless(self):
with test_util.use_gpu():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
bbox_func = functools.partial(
image_ops.stateless_sample_distorted_bounding_box,
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Check error is raised with wrong seed shapes.
for seed in [1, (1, 2, 3)]:
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
begin, end, bbox_for_drawing = bbox_func(seed=seed)
test_seed = (1, 2)
begin, end, bbox_for_drawing = bbox_func(seed=test_seed)
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
self.assertAllEqual([3], begin.shape)
self.assertAllEqual([3], end.shape)
self.assertAllEqual([1, 1, 4], bbox_for_drawing.shape)
def testDeterminismExceptionThrowing(self):
with test_util.deterministic_ops():
with self.assertRaisesRegex(
ValueError, "requires a non-zero seed to be passed in when "
"determinism is enabled"):
image_ops_impl.sample_distorted_bounding_box_v2(
image_size=[50, 50, 1],
bounding_boxes=[[[0., 0., 1., 1.]]],
)
image_ops_impl.sample_distorted_bounding_box_v2(
image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]], seed=1)
with self.assertRaisesRegex(
ValueError, 'requires "seed" or "seed2" to be non-zero when '
"determinism is enabled"):
image_ops_impl.sample_distorted_bounding_box(
image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]])
image_ops_impl.sample_distorted_bounding_box(
image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]], seed=1)
class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase):
METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5,
image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC
]
# Some resize methods, such as Gaussian, are non-interpolating in that they
# change the image even if there is no scale change, for some test, we only
# check the value on the value preserving methods.
INTERPOLATING_METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images_v2(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
# half_pixel_centers unsupported in ResizeBilinear
@test_util.disable_xla("b/127616992")
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
def resize_func(t, new_size, method):
return image_ops.resize_images_v2(t, new_size, method)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = resize_func(image, [6, 4], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = resize_func(image, [6, 4], self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(single_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, 4.0], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [None, 4], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, None], image_ops.ResizeMethod.BILINEAR)
def testReturnDtypeV1(self):
# Shape inference in V1.
with ops.Graph().as_default():
target_shapes = [[6, 4], [3, 2],
[
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images_v2(image, target_shape, method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
def testReturnDtypeV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def test_dtype(image, target_shape, target_method):
y = image_ops.resize_images_v2(image, target_shape, target_method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
target_shapes = [[6, 4],
[3, 2],
[tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]]
for nptype in self.TYPES:
image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype)
for method in self.METHODS:
for target_shape in target_shapes:
with test_util.run_functions_eagerly(run_func_eagerly):
test_dtype.get_concrete_function(image, target_shape, method)
# half_pixel_centers not supported by XLA
@test_util.disable_xla("b/127616992")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(
image, [target_height, target_width], method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0,
36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5,
87.5, 100.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.LANCZOS3] = [
75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964,
35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045,
35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413
]
expected_data[image_ops.ResizeMethod.LANCZOS5] = [
77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709,
35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658,
32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109
]
expected_data[image_ops.ResizeMethod.GAUSSIAN] = [
61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074,
41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855,
47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619
]
expected_data[image_ops.ResizeMethod.BICUBIC] = [
70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981,
36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731,
41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284
]
expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [
66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959,
39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983,
43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739
]
for nptype in self.TYPES:
for method in expected_data:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-04)
# XLA doesn't implement half_pixel_centers
@test_util.disable_xla("b/127616992")
def testLegacyBicubicMethodsMatchNewMethods(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"),
(gen_image_ops.resize_bicubic, "keyscubic"))
for legacy_method, new_method in methods_to_test:
with self.cached_session():
img_np = np.array(data, dtype=np.float32).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
legacy_result = legacy_method(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
half_pixel_centers=True)
scale = (
constant_op.constant([target_height, target_width],
dtype=dtypes.float32) /
math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32))
new_result = gen_image_ops.scale_and_translate(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
scale,
array_ops.zeros([2]),
kernel_type=new_method,
antialias=False)
self.assertAllClose(
self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session():
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testBfloat16MultipleOps(self):
target_height = 8
target_width = 12
img = np.random.uniform(0, 100, size=(30, 10, 2)).astype(np.float32)
img_bf16 = ops.convert_to_tensor(img, dtype="bfloat16")
new_size = constant_op.constant([target_height, target_width])
img_methods = [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC,
image_ops.ResizeMethod.AREA
]
for method in img_methods:
out_op_bf16 = image_ops.resize_images_v2(img_bf16, new_size, method)
out_op_f32 = image_ops.resize_images_v2(img, new_size, method)
bf16_val = self.evaluate(out_op_bf16)
f32_val = self.evaluate(out_op_f32)
self.assertAllClose(bf16_val, f32_val, rtol=1e-2, atol=1e-2)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = ops.convert_to_tensor(x)
else:
target_max = (max_h, max_w)
x_tensor = x
def resize_func(t,
target_max=target_max,
preserve_aspect_ratio=preserve_aspect_ratio):
return image_ops.resize_images(
t, ops.convert_to_tensor(target_max),
preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session():
return self.evaluate(resize_func(x_tensor))
def _assertResizeEqual(self,
x,
x_shape,
y,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self,
x,
x_shape,
target_shape,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 80, 10]
x = np.random.uniform(size=x_shape)
for preserve_aspect_ratio in [True, False]:
with self.subTest(preserve_aspect_ratio=preserve_aspect_ratio):
expect_shape = [10, 250, 200, 10] if preserve_aspect_ratio \
else [10, 250, 250, 10]
self._assertResizeCheckShape(
x,
x_shape, [250, 250],
expect_shape,
preserve_aspect_ratio=preserve_aspect_ratio)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
def testLargeDim(self):
with self.session():
with self.assertRaises(errors.InvalidArgumentError):
x = np.ones((5, 1, 1, 2))
v = image_ops.resize_images_v2(x, [1610637938, 1610637938],
image_ops.ResizeMethod.BILINEAR)
_ = self.evaluate(v)
class ResizeImagesTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
METHODS = [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.BICUBIC, image_ops.ResizeMethodV1.AREA
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
def resize_func(t, new_size, method):
return image_ops.resize_images(t, new_size, method)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = resize_func(image, [6, 4], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = resize_func(image, [6, 4], self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, 4.0], image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [None, 4], image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, None], image_ops.ResizeMethodV1.BILINEAR)
def testReturnDtypeV1(self):
# Shape inference in V1.
with ops.Graph().as_default():
target_shapes = [[6, 4], [3, 2], [
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images(image, target_shape, method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
def testReturnDtypeV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def test_dtype(image, target_shape, target_method):
y = image_ops.resize_images(image, target_shape, target_method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
target_shapes = [[6, 4],
[3, 2],
[tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]]
for nptype in self.TYPES:
image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype)
for method in self.METHODS:
for target_shape in target_shapes:
with test_util.run_functions_eagerly(run_func_eagerly):
test_dtype.get_concrete_function(image, target_shape, method)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUpAlignCornersFalse(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=False)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpAlignCornersTrue(self):
img_shape = [1, 3, 2, 1]
data = [6, 3, 3, 6, 6, 9]
target_height = 5
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,
6.5, 7.5, 6.0, 7.0, 8.0, 9.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,
9.0, 9.0, 6.0, 6.0, 9.0, 9.0
]
# TODO(b/37749740): Improve alignment of ResizeMethodV1.AREA when
# align_corners=True.
expected_data[image_ops.ResizeMethodV1.AREA] = [
6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,
3.0, 6.0, 6.0, 6.0, 6.0, 9.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=True)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [
128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [
128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,
105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,
75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.BICUBIC)
resized = self.evaluate(y)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session():
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.BILINEAR,
align_corners=align_corners)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
img_shape = [1, 3, 2, 1]
with self.cached_session():
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = ops.convert_to_tensor(x)
else:
target_max = [max_h, max_w]
x_tensor = x
y = image_ops.resize_images(
x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session():
return self.evaluate(y)
def _assertResizeEqual(self, x, x_shape, y, y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self, x, x_shape, target_shape,
y_shape, preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],
preserve_aspect_ratio=False)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImageWithPadV1Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
with self.cached_session():
return self.evaluate(
image_ops.resize_image_with_pad_v1(x_tensor, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test with 3-D tensors.
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
# Test with 4-D tensors.
self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, None, None, None], 55, 66,
[5, 55, 66, None])
self._assertShapeInference([None, None, None, None], 55, 66,
[None, 55, 66, None])
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 3, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# half_pixel_centers not supported by XLA
@test_util.for_all_test_methods(test_util.disable_xla, "b/127616992")
class ResizeImageWithPadV2Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
with self.cached_session():
return self.evaluate(
image_ops.resize_image_with_pad_v2(x_tensor, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test with 3-D tensors.
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
# Test with 4-D tensors.
self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, None, None, None], 55, 66,
[5, 55, 66, None])
self._assertShapeInference([None, None, None, None], 55, 66,
[None, 55, 66, None])
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 3.5, 5.5, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def resize_crop_or_pad(*args):
return image_ops.resize_image_with_crop_or_pad(*args)
with self.cached_session():
return self.evaluate(
resize_crop_or_pad(x_tensor, target_height, target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3, 6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4, 8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [3, 4, 5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
x_shape = [8, 2, 1]
y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 2, 3, 6, 7, 0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0, 0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"inner 3 dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)
self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad"))
def simple_color_ramp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session():
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.cached_session():
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = self.evaluate([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testCropAndDecodeJpeg(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],
[h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]
for crop_window in crop_windows:
# Explicit two stages: decode + crop.
image1 = image_ops.decode_jpeg(jpeg0)
y, x, h, w = crop_window
image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)
# Combined decode+crop.
image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window, channels=3)
# Combined decode+crop should have the same shape inference on image
# sizes.
image1_shape = image1_crop.get_shape().as_list()
image2_shape = image2.get_shape().as_list()
self.assertAllEqual(image1_shape, image2_shape)
# CropAndDecode should be equal to DecodeJpeg+Crop.
image1_crop, image2 = self.evaluate([image1_crop, image2])
self.assertAllEqual(image1_crop, image2)
def testCropAndDecodeJpegWithInvalidCropWindow(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
# Invalid crop windows.
crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],
[11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],
[0, 0, h + 1, w], [0, 0, h, w + 1]]
for crop_window in crop_windows:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Invalid JPEG data or crop window"):
result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
self.evaluate(result)
def testSynthetic(self):
with self.cached_session():
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.cached_session():
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.cached_session():
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = self.evaluate([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
def testExtractJpegShape(self):
# Read a real jpeg and verify shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session():
jpeg = io_ops.read_file(path)
# Extract shape without decoding.
image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg))
self.assertAllEqual(image_shape, [256, 128, 3])
def testExtractJpegShapeforCmyk(self):
# Read a cmyk jpeg image, and verify its shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1_cmyk.jpg")
with self.cached_session():
jpeg = io_ops.read_file(path)
image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg))
# Cmyk jpeg image has 4 channels.
self.assertAllEqual(image_shape, [256, 128, 4])
def testRandomJpegQuality(self):
# Previous implementation of random_jpeg_quality had a bug.
# This unit test tests the fixed version, but due to forward compatibility
# this test can only be done when fixed version is used.
# Test jpeg quality dynamic randomization.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100)
with self.cached_session() as sess:
# Test randomization.
random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)]
are_images_equal = []
for i in range(1, len(random_jpeg_images)):
# Most of them should be different if randomization is occurring
# correctly.
are_images_equal.append(
np.array_equal(random_jpeg_images[0], random_jpeg_images[i]))
self.assertFalse(all(are_images_equal))
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testStatelessRandomJpegQuality(self):
# Test deterministic randomness in jpeg quality by checking that the same
# sequence of jpeg quality adjustments are returned each round given the
# same seed.
with test_util.use_gpu():
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
jpeg_quality = (40, 100)
seeds_list = [(1, 2), (3, 4)]
iterations = 2
random_jpeg_images_all = [[] for _ in range(iterations)]
for random_jpeg_images in random_jpeg_images_all:
for seed in seeds_list:
distorted_jpeg = image_ops.stateless_random_jpeg_quality(
image, jpeg_quality[0], jpeg_quality[1], seed=seed)
# Verify that the random jpeg image is different from the original
# jpeg image.
self.assertNotAllEqual(image, distorted_jpeg)
random_jpeg_images.append(self.evaluate(distorted_jpeg))
# Verify that the results are identical given the same seed.
for i in range(1, iterations):
self.assertAllEqual(random_jpeg_images_all[0],
random_jpeg_images_all[i])
def testAdjustJpegQuality(self):
# Test if image_ops.adjust_jpeg_quality works when jpeq quality
# is an int (not tensor) for backward compatibility.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
jpeg_quality = np.random.randint(40, 100)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality(
image, jpeg_quality)
with self.cached_session() as sess:
sess.run(adjust_jpeg_quality_image)
def testAdjustJpegQualityShape(self):
with self.cached_session():
image = constant_op.constant(
np.arange(24, dtype=np.uint8).reshape([2, 4, 3]))
adjusted_image = image_ops.adjust_jpeg_quality(image, 80)
adjusted_image.shape.assert_is_compatible_with([None, None, 3])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"),
(3, "lena_palette.png"), (4, "lena_palette_trns.png"))
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.cached_session():
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = self.evaluate([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, self.evaluate(image1))
def testSynthetic(self):
with self.cached_session():
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.cached_session():
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.cached_session():
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.cached_session():
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def _testValid(self, filename):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.cached_session():
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = self.evaluate([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testValid(self):
self._testValid("scan.gif")
self._testValid("optimized.gif")
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
def testAnimatedGif(self):
# Test if all frames in the animated GIF file is properly decoded.
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif = io_ops.read_file(os.path.join(base, "pendulum_sm.gif"))
gt_frame0 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame0.png"))
gt_frame1 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame1.png"))
gt_frame2 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame2.png"))
image = image_ops.decode_gif(gif)
frame0 = image_ops.decode_png(gt_frame0)
frame1 = image_ops.decode_png(gt_frame1)
frame2 = image_ops.decode_png(gt_frame2)
image, frame0, frame1, frame2 = self.evaluate([image, frame0, frame1,
frame2])
# Compare decoded gif frames with ground-truth data.
self.assertAllEqual(image[0], frame0)
self.assertAllEqual(image[1], frame1)
self.assertAllEqual(image[2], frame2)
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.cached_session():
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y, y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate, y_np, atol=1e-5)
def testNoConvert(self):
# Tests with Tensor.op requires a graph.
with ops.Graph().as_default():
# Make sure converting to the same data type creates only an identity op
with self.cached_session():
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEqual(y.op.type, "Identity")
self.assertEqual(y.op.inputs[0], image)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.cached_session():
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.cached_session():
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.cached_session():
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
def testConvertBetweenInt16AndInt8(self):
with self.cached_session():
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.cached_session():
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = self.evaluate(y)
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
# TODO(b/133851381): re-enable this test.
def disabledtestTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2], [4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18], [29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120], [193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var) # pylint: disable=invalid-unary-operand-type
# Scale the pixel-values by a float. This scales the total variation as
# well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
class FormatTest(test_util.TensorFlowTestCase):
def testFormats(self):
prefix = "tensorflow/core/lib"
paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg",
"gif/testdata/lena.gif")
decoders = {
"jpeg": functools.partial(image_ops.decode_jpeg, channels=3),
"png": functools.partial(image_ops.decode_png, channels=3),
"gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),
}
with self.cached_session():
for path in paths:
contents = self.evaluate(io_ops.read_file(os.path.join(prefix, path)))
images = {}
for name, decode in decoders.items():
image = self.evaluate(decode(contents))
self.assertEqual(image.ndim, 3)
for prev_name, prev in images.items():
print("path %s, names %s %s, shapes %s %s" %
(path, name, prev_name, image.shape, prev.shape))
self.assertAllEqual(image, prev)
images[name] = image
def testError(self):
path = "tensorflow/core/lib/gif/testdata/scan.gif"
with self.cached_session():
for decode in image_ops.decode_jpeg, image_ops.decode_png:
with self.assertRaisesOpError(r"Got 12 frames"):
decode(io_ops.read_file(path)).eval()
class CombinedNonMaxSuppressionTest(test_util.TensorFlowTestCase):
# NOTE(b/142795960): parameterized tests do not work well with tf.tensor
# inputs. Due to failures, creating another test `testInvalidTensorInput`
# which is identical to this one except that the input here is a scalar as
# opposed to a tensor.
def testInvalidPyInput(self):
boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]]
scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
max_output_size_per_class = 5
max_total_size = 2**31
with self.assertRaisesRegex(
(TypeError, ValueError),
"type int64 that does not match expected type of int32|"
"Tensor conversion requested dtype int32 for Tensor with dtype int64"):
image_ops.combined_non_max_suppression(
boxes=boxes_np,
scores=scores_np,
max_output_size_per_class=max_output_size_per_class,
max_total_size=max_total_size)
# NOTE(b/142795960): parameterized tests do not work well with tf.tensor
# inputs. Due to failures, creating another this test which is identical to
# `testInvalidPyInput` except that the input is a tensor here as opposed
# to a scalar.
def testInvalidTensorInput(self):
boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]]
scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
max_output_size_per_class = 5
max_total_size = ops.convert_to_tensor(2**31)
with self.assertRaisesRegex(
(TypeError, ValueError),
"type int64 that does not match expected type of int32|"
"Tensor conversion requested dtype int32 for Tensor with dtype int64"):
image_ops.combined_non_max_suppression(
boxes=boxes_np,
scores=scores_np,
max_output_size_per_class=max_output_size_per_class,
max_total_size=max_total_size)
class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
def testNonMaxSuppression(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices, [3, 0, 5])
def testInvalidShape(self):
def nms_func(box, score, max_output_size, iou_thres):
return image_ops.non_max_suppression(box, score, max_output_size,
iou_thres)
max_output_size = 3
iou_thres = 0.5
# The boxes should be 2D of shape [num_boxes, 4].
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, iou_thres)
# Dimensions must be 4 (but is 3)
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, iou_thres)
# The boxes is of shape [num_boxes, 4], and the scores is
# of shape [num_boxes]. So an error will be thrown bc 1 != 2.
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, iou_thres)
# The scores should be 1D of shape [num_boxes].
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([[0.9]])
nms_func(boxes, scores, max_output_size, iou_thres)
# The max output size should be a scalar (0-D).
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, [[max_output_size]], iou_thres)
# The iou_threshold should be a scalar (0-D).
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, [[iou_thres]])
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testTensors(self):
with context.eager_mode():
boxes_tensor = constant_op.constant([[6.625, 6.688, 272., 158.5],
[6.625, 6.75, 270.5, 158.4],
[5.375, 5., 272., 157.5]])
scores_tensor = constant_op.constant([0.84, 0.7944, 0.7715])
max_output_size = 100
iou_threshold = 0.5
score_threshold = 0.3
soft_nms_sigma = 0.25
pad_to_max_output_size = False
# gen_image_ops.non_max_suppression_v5.
for dtype in [np.float16, np.float32]:
boxes = math_ops.cast(boxes_tensor, dtype=dtype)
scores = math_ops.cast(scores_tensor, dtype=dtype)
_, _, num_selected = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma, pad_to_max_output_size)
self.assertEqual(num_selected.numpy(), 1)
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testDataTypes(self):
# Test case for GitHub issue 20199.
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = float("-inf")
# Note: There are multiple versions of non_max_suppression v2, v3, v4.
# gen_image_ops.non_max_suppression_v2:
for input_dtype in [np.float16, np.float32]:
for threshold_dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=input_dtype)
scores = constant_op.constant(scores_np, dtype=input_dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(
iou_threshold_np, dtype=threshold_dtype)
selected_indices = gen_image_ops.non_max_suppression_v2(
boxes, scores, max_output_size, iou_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v3
for input_dtype in [np.float16, np.float32]:
for threshold_dtype in [np.float16, np.float32]:
# XLA currently requires dtypes to be equal.
if input_dtype == threshold_dtype or not test_util.is_xla_enabled():
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=input_dtype)
scores = constant_op.constant(scores_np, dtype=input_dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(
iou_threshold_np, dtype=threshold_dtype)
score_threshold = constant_op.constant(
score_threshold_np, dtype=threshold_dtype)
selected_indices = gen_image_ops.non_max_suppression_v3(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v4.
for input_dtype in [np.float16, np.float32]:
for threshold_dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=input_dtype)
scores = constant_op.constant(scores_np, dtype=input_dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(
iou_threshold_np, dtype=threshold_dtype)
score_threshold = constant_op.constant(
score_threshold_np, dtype=threshold_dtype)
selected_indices, _ = gen_image_ops.non_max_suppression_v4(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v5.
soft_nms_sigma_np = float(0.0)
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np, dtype=dtype)
selected_indices, _, _ = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
def testZeroIOUThreshold(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [1., 1., 1., 1., 1., 1.]
max_output_size_np = 3
iou_threshold_np = 0.0
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices, [0, 3, 5])
class NonMaxSuppressionWithScoresTest(test_util.TensorFlowTestCase):
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersWithSoftNMS(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 6
iou_threshold_np = 0.5
score_threshold_np = 0.0
soft_nms_sigma_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np)
selected_indices, selected_scores = \
image_ops.non_max_suppression_with_scores(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold,
soft_nms_sigma)
selected_indices, selected_scores = self.evaluate(
[selected_indices, selected_scores])
self.assertAllClose(selected_indices, [3, 0, 1, 5, 4, 2])
self.assertAllClose(selected_scores,
[0.95, 0.9, 0.384, 0.3, 0.256, 0.197],
rtol=1e-2, atol=1e-2)
class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices_padded, num_valid_padded = \
image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold):
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
yp, nvp = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
y, n = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(yp.shape.is_fully_defined(), True)
self.assertEqual(y.shape.is_fully_defined(), False)
return yp, nvp, y, n
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
selected_indices_padded, num_valid_padded, selected_indices, num_valid = \
func(boxes_np, scores_np, max_output_size_np, iou_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(self.evaluate(num_valid_padded), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(self.evaluate(num_valid), 3)
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold, score_threshold):
boxes = constant_op.constant(boxes)
scores = constant_op.constant(scores)
max_output_size = constant_op.constant(max_output_size)
iou_threshold = constant_op.constant(iou_threshold)
score_threshold = constant_op.constant(score_threshold)
y, nv = image_ops.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold, score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(y.shape.is_fully_defined(), False)
return y, nv
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
selected_indices, num_valid = func(boxes_np, scores_np,
max_output_size_np, iou_threshold_np,
score_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(self.evaluate(num_valid), 3)
def testInvalidDtype(self):
boxes_np = [[4.0, 6.0, 3.0, 6.0],
[2.0, 1.0, 5.0, 4.0],
[9.0, 0.0, 9.0, 9.0]]
scores = [5.0, 6.0, 5.0]
max_output_size = 2**31
with self.assertRaisesRegex(
(TypeError, ValueError), "type int64 that does not match type int32"):
boxes = constant_op.constant(boxes_np)
image_ops.non_max_suppression_padded(boxes, scores, max_output_size)
class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):
def testSelectOneFromThree(self):
overlaps_np = [
[1.0, 0.7, 0.2],
[0.7, 1.0, 0.0],
[0.2, 0.0, 1.0],
]
scores_np = [0.7, 0.9, 0.1]
max_output_size_np = 3
overlaps = constant_op.constant(overlaps_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
overlap_threshold = 0.6
score_threshold = 0.4
selected_indices = image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
with self.cached_session():
self.assertAllClose(selected_indices, [1])
class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):
"""Tests utility function used by ssim() and psnr()."""
def testWrongDims(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
img = array_ops.placeholder(dtype=dtypes.float32)
img_np = np.array((2, 2))
with self.cached_session() as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img: img_np})
def testShapeMismatch(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
img1 = array_ops.placeholder(dtype=dtypes.float32)
img2 = array_ops.placeholder(dtype=dtypes.float32)
img1_np = np.array([1, 2, 2, 1])
img2_np = np.array([1, 3, 3, 1])
with self.cached_session() as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(
img1, img2)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img1: img1_np, img2: img2_np})
class PSNRTest(test_util.TensorFlowTestCase):
"""Tests for PSNR."""
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/psnr/testdata", filename))
im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE")
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
q20 = self._LoadTestImage(sess, "cat_q20.jpg")
q72 = self._LoadTestImage(sess, "cat_q72.jpg")
q95 = self._LoadTestImage(sess, "cat_q95.jpg")
return q20, q72, q95
def _PSNR_NumPy(self, orig, target, max_value):
"""Numpy implementation of PSNR."""
mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))
return 20 * np.log10(max_value) - 10 * np.log10(mse)
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testPSNRSingleImage(self):
image1 = self._RandomImage((8, 8, 1), 1)
image2 = self._RandomImage((8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session():
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testPSNRMultiImage(self):
image1 = self._RandomImage((10, 8, 8, 1), 1)
image2 = self._RandomImage((10, 8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session():
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testGoldenPSNR(self):
q20, q72, q95 = self._LoadTestImages()
# Verify NumPy implementation first.
# Golden values are generated using GNU Octave's psnr() function.
psnr1 = self._PSNR_NumPy(q20, q72, 1)
self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype))
psnr2 = self._PSNR_NumPy(q20, q95, 1)
self.assertNear(29.994, psnr2, 0.001)
psnr3 = self._PSNR_NumPy(q72, q95, 1)
self.assertNear(35.302, psnr3, 0.001)
# Test TensorFlow implementation.
with self.cached_session():
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)
tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)
tf_psnr1 = self.evaluate(image_ops.psnr(tf_q20, tf_q72, 1, "psnr1"))
tf_psnr2 = self.evaluate(image_ops.psnr(tf_q20, tf_q95, 1, "psnr2"))
tf_psnr3 = self.evaluate(image_ops.psnr(tf_q72, tf_q95, 1, "psnr3"))
self.assertAllClose(psnr1, tf_psnr1, atol=0.001)
self.assertAllClose(psnr2, tf_psnr2, atol=0.001)
self.assertAllClose(psnr3, tf_psnr3, atol=0.001)
def testInfinity(self):
q20, _, _ = self._LoadTestImages()
psnr = self._PSNR_NumPy(q20, q20, 1)
with self.cached_session():
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_q20, tf_q20, 1, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testInt(self):
img1 = self._RandomImage((10, 8, 8, 1), 255)
img2 = self._RandomImage((10, 8, 8, 1), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
psnr_uint8 = image_ops.psnr(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
psnr_float32 = image_ops.psnr(img1, img2, 1.0)
with self.cached_session():
self.assertAllClose(
self.evaluate(psnr_uint8), self.evaluate(psnr_float32), atol=0.001)
class SSIMTest(test_util.TensorFlowTestCase):
"""Tests for SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_ssim = np.asarray([[1.000000, 0.230880, 0.231153],
[0.230880, 1.000000, 0.996828],
[0.231153, 0.996828, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against values produced by Matlab."""
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testBatch(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBatchNumpyInputs(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
with self.cached_session():
img1 = self.evaluate(constant_op.constant(img1))
img2 = self.evaluate(constant_op.constant(img2))
ssim = image_ops.ssim(
img1,
img2,
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBroadcast(self):
img = self._LoadTestImages()[:2]
expected = self._ssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
ssim = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testNegative(self):
"""Tests against negative SSIM index."""
step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)
img1 = np.tile(step, (16, 1))
img2 = np.fliplr(img1)
img1 = img1.reshape((1, 16, 16, 1))
img2 = img2.reshape((1, 16, 16, 1))
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertLess(self.evaluate(ssim), 0)
def testInt(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
def testWithIndexMap(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
ssim_locals = image_ops.ssim(
img1,
img2,
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_index_map=True)
self.assertEqual(ssim_locals.shape, (1, 6, 6))
ssim_global = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
axes = constant_op.constant([-2, -1], dtype=dtypes.int32)
self.assertAllClose(ssim_global, math_ops.reduce_mean(ssim_locals, axes))
class MultiscaleSSIMTest(test_util.TensorFlowTestCase):
"""Tests for MS-SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_msssim = np.asarray([[1.000000, 0.091016, 0.091025],
[0.091016, 1.000000, 0.999567],
[0.091025, 0.999567, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against MS-SSIM computed with Matlab implementation.
For color images, MS-SSIM scores are averaged over color channels.
"""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim_multiscale(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testUnweightedIsDifferentiable(self):
img = self._LoadTestImages()
@def_function.function
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testUnweightedIsDifferentiableEager(self):
if not context.executing_eagerly():
self.skipTest("Eager mode only")
img = self._LoadTestImages()
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testBatch(self):
"""Tests MS-SSIM computed in batch."""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
msssim = image_ops.ssim_multiscale(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(msssim), 1e-4)
def testBroadcast(self):
"""Tests MS-SSIM broadcasting."""
img = self._LoadTestImages()[:2]
expected = self._msssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
score_tensor = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4)
def testRange(self):
"""Tests against low MS-SSIM score.
MS-SSIM is a geometric mean of SSIM and CS scores of various scales.
If any of the value is negative so that the geometric mean is not
well-defined, then treat the MS-SSIM score as zero.
"""
with self.cached_session() as sess:
img1 = self._LoadTestImage(sess, "checkerboard1.png")
img2 = self._LoadTestImage(sess, "checkerboard3.png")
images = [img1, img2, np.zeros_like(img1),
np.full_like(img1, fill_value=255)]
images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]
msssim_ops = [
image_ops.ssim_multiscale(
x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
for x, y in itertools.combinations(images, 2)
]
msssim = self.evaluate(msssim_ops)
msssim = np.squeeze(msssim)
self.assertTrue(np.all(msssim >= 0.0))
self.assertTrue(np.all(msssim <= 1.0))
def testInt(self):
img1 = self._RandomImage((1, 180, 240, 3), 255)
img2 = self._RandomImage((1, 180, 240, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim_multiscale(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
def testNumpyInput(self):
"""Test case for GitHub issue 28241."""
image = np.random.random([512, 512, 1])
score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0)
with self.cached_session():
_ = self.evaluate(score_tensor)
class ImageGradientsTest(test_util.TensorFlowTestCase):
def testImageGradients(self):
shape = [1, 2, 4, 1]
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
img = array_ops.reshape(img, shape)
expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)
expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)
dy, dx = image_ops.image_gradients(img)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsMultiChannelBatch(self):
batch = [[[[1, 2], [2, 5], [3, 3]],
[[8, 4], [5, 1], [9, 8]]],
[[[5, 3], [7, 9], [1, 6]],
[[1, 2], [6, 3], [6, 3]]]]
expected_dy = [[[[7, 2], [3, -4], [6, 5]],
[[0, 0], [0, 0], [0, 0]]],
[[[-4, -1], [-1, -6], [5, -3]],
[[0, 0], [0, 0], [0, 0]]]]
expected_dx = [[[[1, 3], [1, -2], [0, 0]],
[[-3, -3], [4, 7], [0, 0]]],
[[[2, 6], [-6, -3], [0, 0]],
[[5, 1], [0, 0], [0, 0]]]]
batch = constant_op.constant(batch)
assert batch.get_shape().as_list() == [2, 2, 3, 2]
dy, dx = image_ops.image_gradients(batch)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsBadShape(self):
# [2 x 4] image but missing batch and depth dimensions.
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
with self.assertRaises(ValueError):
image_ops.image_gradients(img)
class SobelEdgesTest(test_util.TensorFlowTestCase):
def disabled_testSobelEdges1x2x3x1(self):
img = constant_op.constant([[1, 3, 6], [4, 1, 5]],
dtype=dtypes.float32, shape=[1, 2, 3, 1])
expected = np.reshape([[[0, 0], [0, 12], [0, 0]],
[[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])
sobel = image_ops.sobel_edges(img)
with self.cached_session():
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected, actual_sobel)
def testSobelEdges5x3x4x2(self):
batch_size = 5
plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],
[1, 3, 4, 1])
two_channel = np.concatenate([plane, plane], axis=3)
batch = np.concatenate([two_channel] * batch_size, axis=0)
img = constant_op.constant(batch, dtype=dtypes.float32,
shape=[batch_size, 3, 4, 2])
expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],
[[6, 0], [0, 6], [-6, 10], [-6, 0]],
[[0, 0], [0, 0], [0, 10], [0, 0]]],
[1, 3, 4, 1, 2])
expected_two_channel = np.concatenate(
[expected_plane, expected_plane], axis=3)
expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)
sobel = image_ops.sobel_edges(img)
with self.cached_session():
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected_batch, actual_sobel)
@test_util.run_all_in_graph_and_eager_modes
class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
_FORWARD_COMPATIBILITY_HORIZONS = [
(2020, 1, 1),
(2020, 7, 14),
(2525, 1, 1), # future behavior
]
def testBmpChannels(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with test_util.use_gpu():
base = "tensorflow/core/lib/bmp/testdata"
# `rgba_transparent.bmp` has 4 channels with transparent pixels.
# Test consistency between `decode_image` and `decode_bmp` functions.
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
image0 = image_ops.decode_image(bmp0, channels=4)
image1 = image_ops.decode_bmp(bmp0, channels=4)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 4 channels.
# Note that this operation simply drops 4th channel information. This
# is the same behavior as `decode_png`.
# e.g. pixel values [25, 25, 25, 100] becomes [25, 25, 25].
bmp1 = io_ops.read_file(os.path.join(base, "rgb_small.bmp"))
image2 = image_ops.decode_bmp(bmp0, channels=3)
image3 = image_ops.decode_bmp(bmp1)
image2, image3 = self.evaluate([image2, image3])
self.assertAllEqual(image2, image3)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 3 channels. Alpha channel should be set to
# UINT8_MAX.
bmp3 = io_ops.read_file(os.path.join(base, "rgb_small_255.bmp"))
bmp4 = io_ops.read_file(os.path.join(base, "rgba_small_255.bmp"))
image4 = image_ops.decode_bmp(bmp3, channels=4)
image5 = image_ops.decode_bmp(bmp4)
image4, image5 = self.evaluate([image4, image5])
self.assertAllEqual(image4, image5)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 1 channel (grayscale).
bmp6 = io_ops.read_file(os.path.join(base, "grayscale_small.bmp"))
bmp7 = io_ops.read_file(
os.path.join(base, "grayscale_small_3channels.bmp"))
image6 = image_ops.decode_bmp(bmp6, channels=3)
image7 = image_ops.decode_bmp(bmp7)
image6, image7 = self.evaluate([image6, image7])
self.assertAllEqual(image6, image7)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 1 channel (grayscale). Alpha channel should be
# set to UINT8_MAX.
bmp9 = io_ops.read_file(
os.path.join(base, "grayscale_small_4channels.bmp"))
image8 = image_ops.decode_bmp(bmp6, channels=4)
image9 = image_ops.decode_bmp(bmp9)
image8, image9 = self.evaluate([image8, image9])
self.assertAllEqual(image8, image9)
def testJpegUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# NumPy conversions should happen before
x = np.random.randint(256, size=(4, 4, 3), dtype=np.uint16)
x_str = image_ops_impl.encode_png(x)
x_dec = image_ops_impl.decode_image(
x_str, channels=3, dtype=dtypes.uint16)
self.assertAllEqual(x, x_dec)
def testGifUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testJpegFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testExpandAnimations(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
# Test `expand_animations=False` case.
image0 = image_ops.decode_image(
gif0, dtype=dtypes.float32, expand_animations=False)
# image_ops.decode_png() handles GIFs and returns 3D tensors
animation = image_ops.decode_gif(gif0)
first_frame = array_ops.gather(animation, 0)
image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertLen(image0.shape, 3)
self.assertAllEqual(list(image0.shape), [40, 20, 3])
self.assertAllEqual(image0, image1)
# Test `expand_animations=True` case.
image2 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image3 = image_ops.convert_image_dtype(animation, dtypes.float32)
image2, image3 = self.evaluate([image2, image3])
self.assertLen(image2.shape, 4)
self.assertAllEqual(list(image2.shape), [12, 40, 20, 3])
self.assertAllEqual(image2, image3)
def testImageCropAndResize(self):
if test_util.is_gpu_available():
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
else:
message = "Boxes contains at least one element that is not finite"
with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
message):
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
def testImageCropAndResizeWithInvalidInput(self):
with self.session():
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
op = image_ops_impl.crop_and_resize_v2(
image=np.ones((1, 1, 1, 1)),
boxes=np.ones((11, 4)),
box_indices=np.ones((11)),
crop_size=[2065374891, 1145309325])
self.evaluate(op)
@parameterized.named_parameters(
("_jpeg", "JPEG", "jpeg_merge_test1.jpg"),
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
)
def testWrongOpBmp(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = "Trying to decode " + img_format + " format using DecodeBmp op"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_bmp(img_bytes)
self.evaluate(img)
@parameterized.named_parameters(
("_jpeg", image_ops.decode_jpeg, "DecodeJpeg"),
("_png", image_ops.decode_png, "DecodePng"),
("_gif", image_ops.decode_gif, "DecodeGif"),
)
def testWrongOp(self, decode_op, op_used):
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
err_msg = ("Trying to decode BMP format using a wrong op. Use `decode_bmp` "
"or `decode_image` instead. Op used: ") + op_used
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img = decode_op(bmp0)
self.evaluate(img)
@parameterized.named_parameters(
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
("_bmp", "BMP", "rgba_small.bmp"),
)
def testWrongOpJpeg(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = ("DecodeAndCropJpeg operation can run on JPEG only, but "
"detected ") + img_format
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_and_crop_jpeg(img_bytes, [1, 1, 2, 2])
self.evaluate(img)
def testGifFramesWithDiffSize(self):
"""Test decoding an animated GIF.
This test verifies that `decode_image` op can decode animated GIFs whose
first frame does not fill the canvas. The unoccupied areas should be filled
with zeros (black).
`squares.gif` is animated with two images of different sizes. It
alternates between a smaller image of size 10 x 10 and a larger image of
size 16 x 16. Because it starts animating with the smaller image, the first
frame does not fill the canvas. (Canvas size is equal to max frame width x
max frame height.)
`red_black.gif` has just a single image in a GIF format. It is the same
image as the smaller image (size 10 x 10) of the two images in
`squares.gif`. The only difference is that its background (canvas - smaller
image) is pre-filled with zeros (black); it is the groundtruth.
"""
base = "tensorflow/core/lib/gif/testdata"
gif_bytes0 = io_ops.read_file(os.path.join(base, "squares.gif"))
image0 = image_ops.decode_image(gif_bytes0, dtype=dtypes.float32,
expand_animations=False)
gif_bytes1 = io_ops.read_file(os.path.join(base, "red_black.gif"))
image1 = image_ops.decode_image(gif_bytes1, dtype=dtypes.float32)
image1_0 = array_ops.gather(image1, 0)
image0, image1_0 = self.evaluate([image0, image1_0])
self.assertAllEqual(image0, image1_0)
if __name__ == "__main__":
googletest.main()
|
GHSA-368v-7v32-52fx
|
tests/test_rencode.py
|
@@ -401,6 +401,11 @@ def test_version_exposed(self):
"version number does not match",
)
+ def test_invalid_typecode(self):
+ s = b";\x2f\x7f"
+ with self.assertRaises(ValueError):
+ rencode.loads(s)
+
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
#
# test_rencode.py
#
# Copyright (C) 2010 Andrew Resch <[email protected]>
#
# rencode is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# rencode is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rencode. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
import sys
import unittest
from rencode import _rencode as rencode
from rencode import rencode_orig
# Hack to deal with python 2 and 3 differences with unicode literals.
if sys.version < "3":
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
unicode = str
def u(x):
return x
class TestRencode(unittest.TestCase):
def test_encode_fixed_pos_int(self):
self.assertEqual(rencode.dumps(1), rencode_orig.dumps(1))
self.assertEqual(rencode.dumps(40), rencode_orig.dumps(40))
def test_encode_fixed_neg_int(self):
self.assertEqual(rencode.dumps(-10), rencode_orig.dumps(-10))
self.assertEqual(rencode.dumps(-29), rencode_orig.dumps(-29))
def test_encode_int_char_size(self):
self.assertEqual(rencode.dumps(100), rencode_orig.dumps(100))
self.assertEqual(rencode.dumps(-100), rencode_orig.dumps(-100))
def test_encode_int_short_size(self):
self.assertEqual(rencode.dumps(27123), rencode_orig.dumps(27123))
self.assertEqual(rencode.dumps(-27123), rencode_orig.dumps(-27123))
def test_encode_int_int_size(self):
self.assertEqual(rencode.dumps(7483648), rencode_orig.dumps(7483648))
self.assertEqual(rencode.dumps(-7483648), rencode_orig.dumps(-7483648))
def test_encode_int_long_long_size(self):
self.assertEqual(
rencode.dumps(8223372036854775808), rencode_orig.dumps(8223372036854775808)
)
self.assertEqual(
rencode.dumps(-8223372036854775808),
rencode_orig.dumps(-8223372036854775808),
)
def test_encode_int_big_number(self):
n = int("9" * 62)
self.assertEqual(rencode.dumps(n), rencode_orig.dumps(n))
self.assertRaises(ValueError, rencode.dumps, int("9" * 65))
def test_encode_float_32bit(self):
self.assertEqual(rencode.dumps(1234.56), rencode_orig.dumps(1234.56))
def test_encode_float_64bit(self):
self.assertEqual(rencode.dumps(1234.56, 64), rencode_orig.dumps(1234.56, 64))
def test_encode_float_invalid_size(self):
self.assertRaises(ValueError, rencode.dumps, 1234.56, 36)
def test_encode_fixed_str(self):
self.assertEqual(rencode.dumps(b"foobarbaz"), rencode_orig.dumps(b"foobarbaz"))
def test_encode_str(self):
self.assertEqual(rencode.dumps(b"f" * 255), rencode_orig.dumps(b"f" * 255))
self.assertEqual(rencode.dumps(b"\0"), rencode_orig.dumps(b"\0"))
def test_encode_unicode(self):
self.assertEqual(rencode.dumps(u("fööbar")), rencode_orig.dumps(u("fööbar")))
def test_encode_none(self):
self.assertEqual(rencode.dumps(None), rencode_orig.dumps(None))
def test_encode_bool(self):
self.assertEqual(rencode.dumps(True), rencode_orig.dumps(True))
self.assertEqual(rencode.dumps(False), rencode_orig.dumps(False))
def test_encode_fixed_list(self):
l = [100, -234.01, b"foobar", u("bäz")] * 4
self.assertEqual(rencode.dumps(l), rencode_orig.dumps(l))
def test_encode_list(self):
l = [100, -234.01, b"foobar", u("bäz")] * 80
self.assertEqual(rencode.dumps(l), rencode_orig.dumps(l))
def test_encode_fixed_dict(self):
s = b"abcdefghijk"
d = dict(zip(s, [1234] * len(s)))
self.assertEqual(rencode.dumps(d), rencode_orig.dumps(d))
def test_encode_dict(self):
s = b"abcdefghijklmnopqrstuvwxyz1234567890"
d = dict(zip(s, [1234] * len(s)))
self.assertEqual(rencode.dumps(d), rencode_orig.dumps(d))
def test_decode_fixed_pos_int(self):
self.assertEqual(rencode.loads(rencode.dumps(10)), 10)
def test_decode_fixed_neg_int(self):
self.assertEqual(rencode.loads(rencode.dumps(-10)), -10)
def test_decode_char(self):
self.assertEqual(rencode.loads(rencode.dumps(100)), 100)
self.assertEqual(rencode.loads(rencode.dumps(-100)), -100)
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([62])))
def test_decode_short(self):
self.assertEqual(rencode.loads(rencode.dumps(27123)), 27123)
self.assertEqual(rencode.loads(rencode.dumps(-27123)), -27123)
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([63])))
def test_decode_int(self):
self.assertEqual(rencode.loads(rencode.dumps(7483648)), 7483648)
self.assertEqual(rencode.loads(rencode.dumps(-7483648)), -7483648)
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([64])))
def test_decode_long_long(self):
self.assertEqual(
rencode.loads(rencode.dumps(8223372036854775808)), 8223372036854775808
)
self.assertEqual(
rencode.loads(rencode.dumps(-8223372036854775808)), -8223372036854775808
)
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([65])))
def test_decode_int_big_number(self):
n = int(b"9" * 62)
toobig = "={x}\x7f".format(x="9" * 65).encode()
self.assertEqual(rencode.loads(rencode.dumps(n)), n)
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([61])))
self.assertRaises(ValueError, rencode.loads, toobig)
def test_decode_float_32bit(self):
f = rencode.dumps(1234.56)
self.assertEqual(rencode.loads(f), rencode_orig.loads(f))
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([66])))
def test_decode_float_64bit(self):
f = rencode.dumps(1234.56, 64)
self.assertEqual(rencode.loads(f), rencode_orig.loads(f))
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([44])))
def test_decode_fixed_str(self):
self.assertEqual(rencode.loads(rencode.dumps(b"foobarbaz")), b"foobarbaz")
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([130])))
def test_decode_str(self):
self.assertEqual(rencode.loads(rencode.dumps(b"f" * 255)), b"f" * 255)
self.assertRaises(IndexError, rencode.loads, b"50")
def test_decode_unicode(self):
self.assertEqual(
rencode.loads(rencode.dumps(u("fööbar"))), u("fööbar").encode("utf8")
)
def test_decode_none(self):
self.assertEqual(rencode.loads(rencode.dumps(None)), None)
def test_decode_bool(self):
self.assertEqual(rencode.loads(rencode.dumps(True)), True)
self.assertEqual(rencode.loads(rencode.dumps(False)), False)
def test_decode_fixed_list(self):
l = [100, False, b"foobar", u("bäz").encode("utf8")] * 4
self.assertEqual(rencode.loads(rencode.dumps(l)), tuple(l))
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([194])))
def test_decode_list(self):
l = [100, False, b"foobar", u("bäz").encode("utf8")] * 80
self.assertEqual(rencode.loads(rencode.dumps(l)), tuple(l))
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([59])))
def test_decode_fixed_dict(self):
s = b"abcdefghijk"
d = dict(zip(s, [1234] * len(s)))
self.assertEqual(rencode.loads(rencode.dumps(d)), d)
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([104])))
def test_decode_dict(self):
s = b"abcdefghijklmnopqrstuvwxyz1234567890"
d = dict(zip(s, [b"foo" * 120] * len(s)))
d2 = {b"foo": d, b"bar": d, b"baz": d}
self.assertEqual(rencode.loads(rencode.dumps(d2)), d2)
self.assertRaises(IndexError, rencode.loads, bytes(bytearray([60])))
def test_decode_str_bytes(self):
b = [202, 132, 100, 114, 97, 119, 1, 0, 0, 63, 1, 242, 63]
d = bytes(bytearray(b))
self.assertEqual(rencode.loads(rencode.dumps(d)), d)
def test_decode_str_nullbytes(self):
b = (
202,
132,
100,
114,
97,
119,
1,
0,
0,
63,
1,
242,
63,
1,
60,
132,
120,
50,
54,
52,
49,
51,
48,
58,
0,
0,
0,
1,
65,
154,
35,
215,
48,
204,
4,
35,
242,
3,
122,
218,
67,
192,
127,
40,
241,
127,
2,
86,
240,
63,
135,
177,
23,
119,
63,
31,
226,
248,
19,
13,
192,
111,
74,
126,
2,
15,
240,
31,
239,
48,
85,
238,
159,
155,
197,
241,
23,
119,
63,
2,
23,
245,
63,
24,
240,
86,
36,
176,
15,
187,
185,
248,
242,
255,
0,
126,
123,
141,
206,
60,
188,
1,
27,
254,
141,
169,
132,
93,
220,
252,
121,
184,
8,
31,
224,
63,
244,
226,
75,
224,
119,
135,
229,
248,
3,
243,
248,
220,
227,
203,
193,
3,
224,
127,
47,
134,
59,
5,
99,
249,
254,
35,
196,
127,
17,
252,
71,
136,
254,
35,
196,
112,
4,
177,
3,
63,
5,
220,
)
d = bytes(bytearray(b))
self.assertEqual(rencode.loads(rencode.dumps(d)), d)
def test_decode_utf8(self):
s = b"foobarbaz"
# no assertIsInstance with python2.6
d = rencode.loads(rencode.dumps(s), decode_utf8=True)
if not isinstance(d, unicode):
self.fail("%s is not an instance of %r" % (repr(d), unicode))
s = rencode.dumps(b"\x56\xe4foo\xc3")
self.assertRaises(UnicodeDecodeError, rencode.loads, s, decode_utf8=True)
def test_version_exposed(self):
assert rencode.__version__
assert rencode_orig.__version__
self.assertEqual(
rencode.__version__[1:],
rencode_orig.__version__[1:],
"version number does not match",
)
if __name__ == "__main__":
unittest.main()
|
PYSEC-2021-345
|
tensorflow/python/ops/bincount_ops_test.py
|
@@ -831,6 +831,25 @@ def test_ragged_input_different_shape_fails(self):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
+class RawOpsHeapOobTest(test.TestCase, parameterized.TestCase):
+
+ @test_util.run_v1_only("Test security error")
+ def testSparseCountSparseOutputBadIndicesShapeTooSmall(self):
+ indices = [1]
+ values = [[1]]
+ weights = []
+ dense_shape = [10]
+ with self.assertRaisesRegex(ValueError,
+ "Shape must be rank 2 but is rank 1 for"):
+ self.evaluate(
+ gen_count_ops.SparseCountSparseOutput(
+ indices=indices,
+ values=values,
+ dense_shape=dense_shape,
+ weights=weights,
+ binary_output=True))
+
+
@test_util.run_all_in_graph_and_eager_modes
@test_util.disable_tfrt
class RawOpsTest(test.TestCase, parameterized.TestCase):
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# maxlengthations under the License.
# ==============================================================================
"""Tests for bincount ops."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import bincount_ops
from tensorflow.python.ops import gen_count_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
class TestSparseCount(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "_no_maxlength",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [2, 6]
}, {
"testcase_name": "_maxlength",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"maxlength": 7,
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]],
"expected_values": [1, 1, 1, 1, 2],
"expected_shape": [2, 7]
}, {
"testcase_name": "_minlength",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 9,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 2, 1],
"expected_shape": [2, 9]
}, {
"testcase_name": "_minlength_larger_values",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 3,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 2, 1],
"expected_shape": [2, 8]
}, {
"testcase_name": "_no_maxlength_binary",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [2, 6],
"binary_output": True,
}, {
"testcase_name": "_maxlength_binary",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"maxlength": 7,
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [2, 7],
"binary_output": True,
}, {
"testcase_name": "_minlength_binary",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 9,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 1, 1],
"expected_shape": [2, 9],
"binary_output": True,
}, {
"testcase_name": "_minlength_larger_values_binary",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 3,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 1, 1],
"expected_shape": [2, 8],
"binary_output": True,
}, {
"testcase_name": "_no_maxlength_weights",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]],
"expected_values": [2, 1, 0.5, 9, 3],
"expected_shape": [2, 6],
"weights": [[0.5, 1, 2], [3, 4, 5]]
}, {
"testcase_name": "_maxlength_weights",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"maxlength": 7,
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]],
"expected_values": [2, 1, 0.5, 3, 9],
"expected_shape": [2, 7],
"weights": [[0.5, 1, 2, 11], [7, 3, 4, 5]]
}, {
"testcase_name": "_minlength_weights",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 9,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [2, 1, 0.5, 3, 5, 13, 4],
"expected_shape": [2, 9],
"weights": [[0.5, 1, 2, 3], [4, 5, 6, 7]]
}, {
"testcase_name": "_minlength_larger_values_weights",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 3,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [2, 1, 0.5, 3, 5, 13, 4],
"expected_shape": [2, 8],
"weights": [[0.5, 1, 2, 3], [4, 5, 6, 7]]
}, {
"testcase_name": "_1d",
"x": np.array([3, 2, 1, 1], dtype=np.int32),
"expected_indices": [[1], [2], [3]],
"expected_values": [2, 1, 1],
"expected_shape": [4]
}, {
"testcase_name": "_all_axes",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[1], [2], [3], [4], [5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [6],
"axis": None
})
def test_dense_input(self,
x,
expected_indices,
expected_values,
expected_shape,
minlength=None,
maxlength=None,
binary_output=False,
weights=None,
axis=-1):
y = bincount_ops.sparse_bincount(
x,
weights=weights,
minlength=minlength,
maxlength=maxlength,
binary_output=binary_output,
axis=axis)
self.assertAllEqual(expected_indices, y.indices)
self.assertAllEqual(expected_values, y.values)
self.assertAllEqual(expected_shape, y.dense_shape)
@parameterized.named_parameters(
{
"testcase_name":
"_no_maxlength",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 2, 1],
"expected_shape": [3, 6],
},
{
"testcase_name":
"_maxlength",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 2, 1],
"expected_shape": [3, 7],
"maxlength":
7,
},
{
"testcase_name":
"_minlength",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [3, 9],
"minlength":
9,
},
{
"testcase_name":
"_minlength_larger_values",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [3, 8],
"minlength":
3,
},
{
"testcase_name":
"_no_maxlength_binary",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1],
"expected_shape": [3, 6],
"binary_output":
True,
},
{
"testcase_name":
"_maxlength_binary",
"x":
np.array([[3, 0, 1, 0], [0, 0, 7, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1],
"expected_shape": [3, 7],
"maxlength":
7,
"binary_output":
True,
},
{
"testcase_name":
"_minlength_binary",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [3, 9],
"minlength":
9,
"binary_output":
True,
},
{
"testcase_name":
"_minlength_larger_values_binary",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [3, 8],
"minlength":
3,
"binary_output":
True,
},
{
"testcase_name":
"_no_maxlength_weights",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [2, 6, 7, 10],
"expected_shape": [3, 6],
"weights":
np.array([[6, 0, 2, 0], [0, 0, 0, 0], [10, 0, 3.5, 3.5]]),
},
{
"testcase_name":
"_maxlength_weights",
"x":
np.array([[3, 0, 1, 0], [0, 0, 7, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [2, 6, 7, 10],
"expected_shape": [3, 7],
"maxlength":
7,
"weights":
np.array([[6, 0, 2, 0], [0, 0, 14, 0], [10, 0, 3.5, 3.5]]),
},
{
"testcase_name":
"_minlength_weights",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [2, 6, 14, 6.5, 10],
"expected_shape": [3, 9],
"minlength":
9,
"weights":
np.array([[6, 0, 2, 0], [14, 0, 0, 0], [10, 0, 3, 3.5]]),
},
{
"testcase_name":
"_minlength_larger_values_weights",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [2, 6, 14, 6.5, 10],
"expected_shape": [3, 8],
"minlength":
3,
"weights":
np.array([[6, 0, 2, 0], [14, 0, 0, 0], [10, 0, 3, 3.5]]),
},
{
"testcase_name": "_1d",
"x": np.array([3, 0, 1, 1], dtype=np.int32),
"expected_indices": [[1], [3]],
"expected_values": [2, 1],
"expected_shape": [4],
},
{
"testcase_name":
"_all_axes",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[1], [3], [4], [5]],
"expected_values": [1, 1, 2, 1],
"expected_shape": [6],
"axis":
None,
},
)
def test_sparse_input(self,
x,
expected_indices,
expected_values,
expected_shape,
maxlength=None,
minlength=None,
binary_output=False,
weights=None,
axis=-1):
x_sparse = sparse_ops.from_dense(x)
w_sparse = sparse_ops.from_dense(weights) if weights is not None else None
y = bincount_ops.sparse_bincount(
x_sparse,
weights=w_sparse,
minlength=minlength,
maxlength=maxlength,
binary_output=binary_output,
axis=axis)
self.assertAllEqual(expected_indices, y.indices)
self.assertAllEqual(expected_values, y.values)
self.assertAllEqual(expected_shape, y.dense_shape)
@parameterized.named_parameters(
{
"testcase_name": "_no_maxlength",
"x": [[], [], [3, 0, 1], [], [5, 0, 4, 4]],
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [1, 1, 1, 1, 2, 1],
"expected_shape": [5, 6],
},
{
"testcase_name": "_maxlength",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"maxlength": 7,
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [1, 1, 1, 1, 2, 1],
"expected_shape": [5, 7],
},
{
"testcase_name": "_minlength",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 9,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [1, 1, 1, 1, 1, 2, 1],
"expected_shape": [5, 9],
},
{
"testcase_name": "_minlength_larger_values",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 3,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [1, 1, 1, 1, 1, 2, 1],
"expected_shape": [5, 8],
},
{
"testcase_name": "_no_maxlength_binary",
"x": [[], [], [3, 0, 1], [], [5, 0, 4, 4]],
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [1, 1, 1, 1, 1, 1],
"expected_shape": [5, 6],
"binary_output": True,
},
{
"testcase_name": "_maxlength_binary",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"maxlength": 7,
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [1, 1, 1, 1, 1, 1],
"expected_shape": [5, 7],
"binary_output": True,
},
{
"testcase_name": "_minlength_binary",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 9,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [1, 1, 1, 1, 1, 1, 1],
"expected_shape": [5, 9],
"binary_output": True,
},
{
"testcase_name": "_minlength_larger_values_binary",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 3,
"binary_output": True,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [1, 1, 1, 1, 1, 1, 1],
"expected_shape": [5, 8],
},
{
"testcase_name": "_no_maxlength_weights",
"x": [[], [], [3, 0, 1], [], [5, 0, 4, 4]],
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [0.5, 2, 6, 0.25, 8, 10],
"expected_shape": [5, 6],
"weights": [[], [], [6, 0.5, 2], [], [10, 0.25, 5, 3]],
},
{
"testcase_name": "_maxlength_weights",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"maxlength": 7,
"expected_indices": [[2, 0], [2, 1], [2, 3], [4, 0], [4, 4], [4, 5]],
"expected_values": [0.5, 2, 6, 0.25, 8, 10],
"expected_shape": [5, 7],
"weights": [[], [], [6, 0.5, 2], [14], [10, 0.25, 5, 3]],
},
{
"testcase_name": "_minlength_weights",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 9,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [0.5, 2, 6, 14, 0.25, 8, 10],
"expected_shape": [5, 9],
"weights": [[], [], [6, 0.5, 2], [14], [10, 0.25, 5, 3]],
},
{
"testcase_name": "_minlength_larger_values_weights",
"x": [[], [], [3, 0, 1], [7], [5, 0, 4, 4]],
"minlength": 3,
"expected_indices": [[2, 0], [2, 1], [2, 3], [3, 7], [4, 0], [4, 4],
[4, 5]],
"expected_values": [0.5, 2, 6, 14, 0.25, 8, 10],
"expected_shape": [5, 8],
"weights": [[], [], [6, 0.5, 2], [14], [10, 0.25, 5, 3]],
},
{
"testcase_name": "_1d",
"x": [3, 0, 1, 1],
"expected_indices": [[0], [1], [3]],
"expected_values": [1, 2, 1],
"expected_shape": [4],
},
{
"testcase_name": "_all_axes",
"x": [[], [], [3, 0, 1], [], [5, 0, 4, 4]],
"expected_indices": [[0], [1], [3], [4], [5]],
"expected_values": [2, 1, 1, 2, 1],
"expected_shape": [6],
"axis": None,
},
)
def test_ragged_input(self,
x,
expected_indices,
expected_values,
expected_shape,
maxlength=None,
minlength=None,
binary_output=False,
weights=None,
axis=-1):
x_ragged = ragged_factory_ops.constant(x)
w = ragged_factory_ops.constant(weights) if weights is not None else None
y = bincount_ops.sparse_bincount(
x_ragged,
weights=w,
minlength=minlength,
maxlength=maxlength,
binary_output=binary_output,
axis=axis)
self.assertAllEqual(expected_indices, y.indices)
self.assertAllEqual(expected_values, y.values)
self.assertAllEqual(expected_shape, y.dense_shape)
class TestDenseBincount(test.TestCase, parameterized.TestCase):
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_input_all_count(self, dtype):
np.random.seed(42)
num_rows = 128
size = 1000
n_elems = 4096
inp_indices = np.random.randint(0, num_rows, (n_elems, 1))
inp_indices = np.concatenate([inp_indices, np.zeros((n_elems, 1))], axis=1)
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
sparse_inp = sparse_tensor.SparseTensor(inp_indices, inp_vals,
[num_rows, 1])
np_out = np.bincount(inp_vals, minlength=size)
self.assertAllEqual(
np_out, self.evaluate(bincount_ops.bincount(sparse_inp, axis=0)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_input_all_count_with_weights(self, dtype):
np.random.seed(42)
num_rows = 128
size = 1000
n_elems = 4096
inp_indices = np.random.randint(0, num_rows, (n_elems, 1))
inp_indices = np.concatenate([inp_indices, np.zeros((n_elems, 1))], axis=1)
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
sparse_inp = sparse_tensor.SparseTensor(inp_indices, inp_vals,
[num_rows, 1])
weight_vals = np.random.random((n_elems,))
sparse_weights = sparse_tensor.SparseTensor(inp_indices, weight_vals,
[num_rows, 1])
np_out = np.bincount(inp_vals, minlength=size, weights=weight_vals)
self.assertAllEqual(
np_out,
self.evaluate(bincount_ops.bincount(
sparse_inp, sparse_weights, axis=0)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_input_all_binary(self, dtype):
np.random.seed(42)
num_rows = 128
size = 10
n_elems = 4096
inp_indices = np.random.randint(0, num_rows, (n_elems, 1))
inp_indices = np.concatenate([inp_indices, np.zeros((n_elems, 1))], axis=1)
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
sparse_inp = sparse_tensor.SparseTensor(inp_indices, inp_vals,
[num_rows, 1])
np_out = np.ones((size,))
self.assertAllEqual(
np_out,
self.evaluate(bincount_ops.bincount(sparse_inp, binary_output=True)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_input_col_reduce_count(self, dtype):
num_rows = 128
num_cols = 27
size = 100
np.random.seed(42)
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate(
[np.bincount(inp[j, :], minlength=size) for j in range(num_rows)],
axis=0), (num_rows, size))
# from_dense will filter out 0s.
inp = inp + 1
# from_dense will cause OOM in GPU.
with ops.device("/CPU:0"):
inp_sparse = sparse_ops.from_dense(inp)
inp_sparse = sparse_tensor.SparseTensor(inp_sparse.indices,
inp_sparse.values - 1,
inp_sparse.dense_shape)
self.assertAllEqual(
np_out, self.evaluate(bincount_ops.bincount(arr=inp_sparse, axis=-1)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_input_col_reduce_binary(self, dtype):
num_rows = 128
num_cols = 27
size = 100
np.random.seed(42)
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate([
np.where(np.bincount(inp[j, :], minlength=size) > 0, 1, 0)
for j in range(num_rows)
],
axis=0), (num_rows, size))
# from_dense will filter out 0s.
inp = inp + 1
# from_dense will cause OOM in GPU.
with ops.device("/CPU:0"):
inp_sparse = sparse_ops.from_dense(inp)
inp_sparse = sparse_tensor.SparseTensor(inp_sparse.indices,
inp_sparse.values - 1,
inp_sparse.dense_shape)
self.assertAllEqual(
np_out,
self.evaluate(
bincount_ops.bincount(arr=inp_sparse, axis=-1, binary_output=True)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]],
dtype)
# pyformat: disable
expected_output = [
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 2, 1]]
# pyformat: enable
self.assertAllEqual(expected_output,
self.evaluate(bincount_ops.bincount(arr=x, axis=-1)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_binary(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]])
# pyformat: disable
expected_output = [
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 1]]
# pyformat: enable
self.assertAllEqual(
expected_output,
self.evaluate(
bincount_ops.bincount(arr=x, axis=-1, binary_output=True)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count_with_weights(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]])
weights = ragged_factory_ops.constant([[], [], [.1, .2, .3], [],
[.2, .5, .6, .3]])
# pyformat: disable
expected_output = [
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[.2, .3, 0, .1, 0, 0],
[0, 0, 0, 0, 0, 0],
[.5, 0, 0, 0, .9, .2]]
# pyformat: enable
self.assertAllClose(
expected_output,
self.evaluate(bincount_ops.bincount(arr=x, weights=weights, axis=-1)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count_np(self, dtype):
np.random.seed(42)
num_rows = 128
num_cols = 27
size = 1000
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate(
[np.bincount(inp[j, :], minlength=size) for j in range(num_rows)],
axis=0), (num_rows, size))
x = ragged_tensor.RaggedTensor.from_tensor(inp)
self.assertAllEqual(
np_out,
self.evaluate(bincount_ops.bincount(arr=x, minlength=size, axis=-1)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count_np_with_weights(self, dtype):
np.random.seed(42)
num_rows = 128
num_cols = 27
size = 1000
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_weight = np.random.random((num_rows, num_cols))
np_out = np.reshape(
np.concatenate([
np.bincount(inp[j, :], weights=np_weight[j, :], minlength=size)
for j in range(num_rows)
],
axis=0), (num_rows, size))
x = ragged_tensor.RaggedTensor.from_tensor(inp)
weights = ragged_tensor.RaggedTensor.from_tensor(np_weight)
self.assertAllEqual(
np_out,
self.evaluate(
bincount_ops.bincount(
arr=x, weights=weights, minlength=size, axis=-1)))
class TestSparseCountFailureModes(test.TestCase):
def test_dense_input_sparse_weights_fails(self):
x = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
weights = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesRegex(ValueError, "must be a tf.Tensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_dense_input_ragged_weights_fails(self):
x = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
weights = ragged_factory_ops.constant([[6, 0.5, 2], [14], [10, 0.25, 5, 3]])
with self.assertRaisesRegex(ValueError, "must be a tf.Tensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_dense_input_wrong_shape_fails(self):
x = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
weights = np.array([[3, 2], [5, 4], [4, 3]])
# Note: Eager mode and graph mode throw different errors here. Graph mode
# will fail with a ValueError from the shape checking logic, while Eager
# will fail with an InvalidArgumentError from the kernel itself.
if context.executing_eagerly():
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same shape"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
else:
with self.assertRaisesRegex(ValueError, "both shapes must be equal"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_dense_weights_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
with self.assertRaisesRegex(ValueError, "must be a SparseTensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_ragged_weights_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = ragged_factory_ops.constant([[6, 0.5, 2], [14], [10, 0.25, 5, 3]])
with self.assertRaisesRegex(ValueError, "must be a SparseTensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_wrong_indices_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = sparse_ops.from_dense(
np.array([[3, 1, 0, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same indices"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_too_many_indices_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = sparse_ops.from_dense(
np.array([[3, 1, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesIncompatibleShapesError():
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_wrong_shape_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4], [0, 0, 0, 0]],
dtype=np.int32))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same dense shape"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_ragged_input_dense_weights_fails(self):
x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
weights = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
with self.assertRaisesRegex(ValueError, "must be a RaggedTensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_ragged_input_sparse_weights_fails(self):
x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
weights = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesRegex(ValueError, "must be a RaggedTensor"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_ragged_input_different_shape_fails(self):
x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
weights = ragged_factory_ops.constant([[6, 0.5, 2], [], [10, 0.25, 5, 3]])
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same row splits"):
self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
@test_util.run_all_in_graph_and_eager_modes
@test_util.disable_tfrt
class RawOpsTest(test.TestCase, parameterized.TestCase):
def testSparseCountSparseOutputBadIndicesShape(self):
indices = [[[0], [0]], [[0], [1]], [[1], [0]], [[1], [2]]]
values = [1, 1, 1, 10]
weights = [1, 2, 4, 6]
dense_shape = [2, 3]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Input indices must be a 2-dimensional tensor"):
self.evaluate(
gen_count_ops.SparseCountSparseOutput(
indices=indices,
values=values,
dense_shape=dense_shape,
weights=weights,
binary_output=False))
def testSparseCountSparseOutputBadWeightsShape(self):
indices = [[0, 0], [0, 1], [1, 0], [1, 2]]
values = [1, 1, 1, 10]
weights = [1, 2, 4]
dense_shape = [2, 3]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Weights and values must have the same shape"):
self.evaluate(
gen_count_ops.SparseCountSparseOutput(
indices=indices,
values=values,
dense_shape=dense_shape,
weights=weights,
binary_output=False))
def testSparseCountSparseOutputBadNumberOfValues(self):
indices = [[0, 0], [0, 1], [1, 0]]
values = [1, 1, 1, 10]
weights = [1, 2, 4, 6]
dense_shape = [2, 3]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Number of values must match first dimension of indices"):
self.evaluate(
gen_count_ops.SparseCountSparseOutput(
indices=indices,
values=values,
dense_shape=dense_shape,
weights=weights,
binary_output=False))
def testRaggedCountSparseOutput(self):
splits = [0, 4, 7]
values = [1, 1, 2, 1, 2, 10, 5]
weights = [1, 2, 3, 4, 5, 6, 7]
output_indices, output_values, output_shape = self.evaluate(
gen_count_ops.RaggedCountSparseOutput(
splits=splits, values=values, weights=weights, binary_output=False))
self.assertAllEqual([[0, 1], [0, 2], [1, 2], [1, 5], [1, 10]],
output_indices)
self.assertAllEqual([7, 3, 5, 7, 6], output_values)
self.assertAllEqual([2, 11], output_shape)
def testRaggedCountSparseOutputBadWeightsShape(self):
splits = [0, 4, 7]
values = [1, 1, 2, 1, 2, 10, 5]
weights = [1, 2, 3, 4, 5, 6]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Weights and values must have the same shape"):
self.evaluate(
gen_count_ops.RaggedCountSparseOutput(
splits=splits,
values=values,
weights=weights,
binary_output=False))
def testRaggedCountSparseOutputEmptySplits(self):
splits = []
values = [1, 1, 2, 1, 2, 10, 5]
weights = [1, 2, 3, 4, 5, 6, 7]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Must provide at least 2 elements for the splits argument"):
self.evaluate(
gen_count_ops.RaggedCountSparseOutput(
splits=splits,
values=values,
weights=weights,
binary_output=False))
def testRaggedCountSparseOutputBadSplitsStart(self):
splits = [1, 7]
values = [1, 1, 2, 1, 2, 10, 5]
weights = [1, 2, 3, 4, 5, 6, 7]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Splits must start with 0"):
self.evaluate(
gen_count_ops.RaggedCountSparseOutput(
splits=splits,
values=values,
weights=weights,
binary_output=False))
def testRaggedCountSparseOutputBadSplitsEnd(self):
splits = [0, 5]
values = [1, 1, 2, 1, 2, 10, 5]
weights = [1, 2, 3, 4, 5, 6, 7]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Splits must end with the number of values"):
self.evaluate(
gen_count_ops.RaggedCountSparseOutput(
splits=splits,
values=values,
weights=weights,
binary_output=False))
if __name__ == "__main__":
test.main()
|
PYSEC-2021-619
|
alerta/auth/basic_ldap.py
|
@@ -27,6 +27,9 @@ def login():
except KeyError:
raise ApiError("must supply 'username' and 'password'", 401)
+ if not password:
+ raise ApiError('password not allowed to be empty', 401)
+
try:
if '\\' in login:
domain, username = login.split('\\')
|
import sys
import ldap # pylint: disable=import-error
from flask import current_app, jsonify, request
from flask_cors import cross_origin
from alerta.auth.utils import create_token, get_customers
from alerta.exceptions import ApiError
from alerta.models.permission import Permission
from alerta.models.user import User
from alerta.utils.audit import auth_audit_trail
from . import auth
@auth.route('/auth/login', methods=['OPTIONS', 'POST'])
@cross_origin(supports_credentials=True)
def login():
# Allow LDAP server to use a self signed certificate
if current_app.config['LDAP_ALLOW_SELF_SIGNED_CERT']:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
# Retrieve required fields from client request
try:
login = request.json.get('username', None) or request.json['email']
password = request.json['password']
except KeyError:
raise ApiError("must supply 'username' and 'password'", 401)
try:
if '\\' in login:
domain, username = login.split('\\')
email = ''
email_verified = False
else:
username, domain = login.split('@')
email = login
email_verified = True
except ValueError:
raise ApiError('expected username with domain', 401)
# Validate LDAP domain
if domain not in current_app.config['LDAP_DOMAINS']:
raise ApiError('unauthorized domain', 403)
userdn = current_app.config['LDAP_DOMAINS'][domain] % username
# Attempt LDAP AUTH
try:
trace_level = 2 if current_app.debug else 0
ldap_connection = ldap.initialize(current_app.config['LDAP_URL'], trace_level=trace_level)
ldap_connection.simple_bind_s(userdn, password)
except ldap.INVALID_CREDENTIALS:
raise ApiError('invalid username or password', 401)
except Exception as e:
raise ApiError(str(e), 500)
# Get email address from LDAP
if not email_verified:
try:
ldap_result = ldap_connection.search_s(userdn, ldap.SCOPE_SUBTREE, '(objectClass=*)', ['mail'])
email = ldap_result[0][1]['mail'][0].decode(sys.stdout.encoding)
email_verified = True
except Exception:
email = '{}@{}'.format(username, domain)
# Create user if not yet there
user = User.find_by_username(username=login)
if not user:
user = User(name=username, login=login, password='', email=email,
roles=[], text='LDAP user', email_verified=email_verified)
try:
user = user.create()
except Exception as e:
ApiError(str(e), 500)
# Assign customers & update last login time
groups = list()
try:
groups_filters = current_app.config.get('LDAP_DOMAINS_GROUP', {})
base_dns = current_app.config.get('LDAP_DOMAINS_BASEDN', {})
if domain in groups_filters and domain in base_dns:
resultID = ldap_connection.search(
base_dns[domain],
ldap.SCOPE_SUBTREE,
groups_filters[domain].format(username=username, email=email, userdn=userdn),
['cn']
)
resultTypes, results = ldap_connection.result(resultID)
for _dn, attributes in results:
groups.append(attributes['cn'][0].decode('utf-8'))
except ldap.LDAPError as e:
raise ApiError(str(e), 500)
# Check user is active
if user.status != 'active':
raise ApiError('User {} not active'.format(login), 403)
user.update_last_login()
scopes = Permission.lookup(login=login, roles=user.roles + groups)
customers = get_customers(login=login, groups=[user.domain] + groups)
auth_audit_trail.send(current_app._get_current_object(), event='basic-ldap-login', message='user login via LDAP',
user=login, customers=customers, scopes=scopes, roles=user.roles, groups=groups,
resource_id=user.id, type='user', request=request)
# Generate token
token = create_token(user_id=user.id, name=user.name, login=user.email, provider='ldap',
customers=customers, scopes=scopes, roles=user.roles, groups=groups,
email=user.email, email_verified=user.email_verified)
return jsonify(token=token.tokenize)
|
PYSEC-2020-159
|
alerta/models/note.py
|
@@ -55,7 +55,7 @@ def serialize(self) -> Dict[str, Any]:
'updateTime': self.update_time,
'_links': dict(),
'customer': self.customer
- }
+ } # type: Dict[str, Any]
if self.alert:
note['_links'] = {
'alert': absolute_url('/alert/' + self.alert)
|
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, Union
from uuid import uuid4
from flask import g
from alerta.app import db
from alerta.database.base import Query
from alerta.models.enums import ChangeType, NoteType
from alerta.models.history import History
from alerta.utils.format import DateTime
from alerta.utils.response import absolute_url
JSON = Dict[str, Any]
class Note:
def __init__(self, text: str, user: str, note_type: str, **kwargs) -> None:
self.id = kwargs.get('id') or str(uuid4())
self.text = text
self.user = user
self.note_type = note_type
self.attributes = kwargs.get('attributes', None) or dict()
self.create_time = kwargs['create_time'] if 'create_time' in kwargs else datetime.utcnow()
self.update_time = kwargs.get('update_time')
self.alert = kwargs.get('alert')
self.customer = kwargs.get('customer')
@classmethod
def parse(cls, json: JSON) -> 'Note':
return Note(
id=json.get('id', None),
text=json.get('status', None),
user=json.get('status', None),
attributes=json.get('attributes', dict()),
note_type=json.get('type', None),
create_time=DateTime.parse(json['createTime']) if 'createTime' in json else None,
update_time=DateTime.parse(json['updateTime']) if 'updateTime' in json else None,
alert=json.get('related', {}).get('alert'),
customer=json.get('customer', None)
)
@property
def serialize(self) -> Dict[str, Any]:
note = {
'id': self.id,
'href': absolute_url('/note/' + self.id),
'text': self.text,
'user': self.user,
'attributes': self.attributes,
'type': self.note_type,
'createTime': self.create_time,
'updateTime': self.update_time,
'_links': dict(),
'customer': self.customer
}
if self.alert:
note['_links'] = {
'alert': absolute_url('/alert/' + self.alert)
}
return note
def __repr__(self) -> str:
return 'Note(id={!r}, text={!r}, user={!r}, type={!r}, customer={!r})'.format(
self.id, self.text, self.user, self.note_type, self.customer
)
@classmethod
def from_document(cls, doc: Dict[str, Any]) -> 'Note':
return Note(
id=doc.get('id', None) or doc.get('_id'),
text=doc.get('text', None),
user=doc.get('user', None),
attributes=doc.get('attributes', dict()),
note_type=doc.get('type', None),
create_time=doc.get('createTime'),
update_time=doc.get('updateTime'),
alert=doc.get('alert'),
customer=doc.get('customer')
)
@classmethod
def from_record(cls, rec) -> 'Note':
return Note(
id=rec.id,
text=rec.text,
user=rec.user,
attributes=dict(rec.attributes),
note_type=rec.type,
create_time=rec.create_time,
update_time=rec.update_time,
alert=rec.alert,
customer=rec.customer
)
@classmethod
def from_db(cls, r: Union[Dict, Tuple]) -> 'Note':
if isinstance(r, dict):
return cls.from_document(r)
elif isinstance(r, tuple):
return cls.from_record(r)
def create(self) -> 'Note':
return Note.from_db(db.create_note(self))
@staticmethod
def from_alert(alert, text):
note = Note(
text=text,
user=g.login,
note_type=NoteType.alert,
attributes=dict(
resource=alert.resource,
event=alert.event,
environment=alert.environment,
severity=alert.severity,
status=alert.status
),
alert=alert.id,
customer=alert.customer
)
history = History(
id=note.id,
event=alert.event,
severity=alert.severity,
status=alert.status,
value=alert.value,
text=text,
change_type=ChangeType.note,
update_time=datetime.utcnow(),
user=g.login
)
db.add_history(alert.id, history)
return note.create()
@staticmethod
def find_by_id(id: str) -> Optional['Note']:
return Note.from_db(db.get_note(id))
@staticmethod
def find_all(query: Query = None) -> List['Note']:
return [Note.from_db(note) for note in db.get_notes(query)]
def update(self, **kwargs) -> 'Note':
return Note.from_db(db.update_note(self.id, **kwargs))
def delete(self) -> bool:
return db.delete_note(self.id)
|
PYSEC-2020-159
|
tensorflow/python/distribute/sharded_variable_test.py
|
@@ -175,8 +175,9 @@ def func():
'scatter_update')
def test_scatter_ops_even_partition(self, op):
v = variables_lib.Variable(array_ops.zeros((30, 1)))
+ # Make sure values does not contain 0 due to testing `scatter_div`!
sparse_delta = ops.IndexedSlices(
- values=constant_op.constant([[0.], [1.], [2.], [3.], [4.]]),
+ values=constant_op.constant([[1.], [2.], [3.], [4.], [5.]]),
indices=constant_op.constant([0, 10, 12, 21, 22]))
v0 = variables_lib.Variable(array_ops.zeros((10, 1)))
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ShardedVariable."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.python.client import session as session_lib
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import sharded_variable
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import nest
def _load_and_run(
model_dir,
inputs,
signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Load a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = ops.Graph()
with graph.as_default(), session_lib.Session() as session:
meta_graph_def = loader.load(session, [tag_constants.SERVING], model_dir)
signature = meta_graph_def.signature_def[signature_key]
feed_dict = {}
for arg_name in inputs.keys():
input_tensor = session.graph.get_tensor_by_name(
signature.inputs[arg_name].name)
feed_dict[input_tensor] = inputs[arg_name]
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = session.graph.get_tensor_by_name(
output_tensor_info.name)
return session.run(output_dict, feed_dict=feed_dict)
class PartitionerTest(test.TestCase):
def test_fixed_shards_partitioner(self):
partitioner = sharded_variable.FixedShardsPartitioner(num_shards=2)
got = partitioner(tensor_shape.TensorShape([10, 3]), dtypes.float32)
self.assertAllEqual(got, [2, 1])
def test_min_size_partitioner(self):
partitioner = sharded_variable.MinSizePartitioner(
min_shard_bytes=4, max_shards=2)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [2, 1])
partitioner = sharded_variable.MinSizePartitioner(
min_shard_bytes=4, max_shards=10)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [6, 1])
def test_max_size_partitioner(self):
partitioner = sharded_variable.MaxSizePartitioner(max_shard_bytes=4)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [6, 1])
partitioner = sharded_variable.MaxSizePartitioner(
max_shard_bytes=4, max_shards=2)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [2, 1])
partitioner = sharded_variable.MaxSizePartitioner(max_shard_bytes=1024)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [1, 1])
class ShardedVariableTest(test.TestCase, parameterized.TestCase):
def test_sharded_variable_simple(self):
v0 = variables_lib.Variable([0])
v1 = variables_lib.Variable([1])
s = sharded_variable.ShardedVariable([v0, v1], name='s')
self.assertEqual(s.variables[0], v0)
self.assertEqual(s.variables[1], v1)
self.assertEqual(s.shape.as_list(), [2])
self.assertEqual(s.dtype, v0.dtype)
self.assertEqual(s.name, 's')
def test_assign(self):
v0 = variables_lib.Variable([[0, 0]])
v1 = variables_lib.Variable([[1, 1], [2, 2]])
v2 = variables_lib.Variable([[3, 3]])
s = sharded_variable.ShardedVariable([v0, v1, v2])
ret = s.assign([[4, 4], [5, 5], [6, 6], [7, 7]])
self.assertAllEqual(self.evaluate(s.variables[0]), [[4, 4]])
self.assertAllEqual(self.evaluate(s.variables[1]), [[5, 5], [6, 6]])
self.assertAllEqual(self.evaluate(s.variables[2]), [[7, 7]])
self.assertIs(ret, s)
def test_assign_add(self):
v0 = variables_lib.Variable([[0, 0]])
v1 = variables_lib.Variable([[1, 1], [2, 2]])
v2 = variables_lib.Variable([[3, 3]])
s = sharded_variable.ShardedVariable([v0, v1, v2])
ret = s.assign_add([[1, 1], [1, 1], [2, 2], [2, 2]])
self.assertAllEqual(self.evaluate(s.variables[0]), [[1, 1]])
self.assertAllEqual(self.evaluate(s.variables[1]), [[2, 2], [4, 4]])
self.assertAllEqual(self.evaluate(s.variables[2]), [[5, 5]])
self.assertIs(ret, s)
def test_assign_sub(self):
v0 = variables_lib.Variable([[0, 0]])
v1 = variables_lib.Variable([[1, 1], [2, 2]])
v2 = variables_lib.Variable([[3, 3]])
s = sharded_variable.ShardedVariable([v0, v1, v2])
ret = s.assign_sub([[0, 0], [1, 1], [1, 1], [3, 3]])
self.assertAllEqual(self.evaluate(s.variables[0]), [[0, 0]])
self.assertAllEqual(self.evaluate(s.variables[1]), [[0, 0], [1, 1]])
self.assertAllEqual(self.evaluate(s.variables[2]), [[0, 0]])
self.assertIs(ret, s)
def test_scatter_add_uneven_partition(self):
v = variables_lib.Variable(array_ops.zeros((32, 1)))
sparse_delta = ops.IndexedSlices(
values=constant_op.constant([[0.], [1.], [2.], [3.], [4.], [5.]]),
indices=constant_op.constant([0, 10, 11, 12, 30, 31]))
v0 = variables_lib.Variable(array_ops.zeros((11, 1)))
v1 = variables_lib.Variable(array_ops.zeros((11, 1)))
v2 = variables_lib.Variable(array_ops.zeros((10, 1)))
sv = sharded_variable.ShardedVariable([v0, v1, v2])
v.scatter_add(sparse_delta)
sv.scatter_add(sparse_delta)
self.assertAllEqual(v, ops.convert_to_tensor(sv))
@def_function.function
def func():
v.scatter_add(sparse_delta)
sv.scatter_add(sparse_delta)
func()
self.assertAllEqual(v, ops.convert_to_tensor(sv))
@parameterized.parameters('scatter_add', 'scatter_div', 'scatter_max',
'scatter_min', 'scatter_mul', 'scatter_sub',
'scatter_update')
def test_scatter_ops_even_partition(self, op):
v = variables_lib.Variable(array_ops.zeros((30, 1)))
sparse_delta = ops.IndexedSlices(
values=constant_op.constant([[0.], [1.], [2.], [3.], [4.]]),
indices=constant_op.constant([0, 10, 12, 21, 22]))
v0 = variables_lib.Variable(array_ops.zeros((10, 1)))
v1 = variables_lib.Variable(array_ops.zeros((10, 1)))
v2 = variables_lib.Variable(array_ops.zeros((10, 1)))
sv = sharded_variable.ShardedVariable([v0, v1, v2])
getattr(v, op)(sparse_delta, name='scatter_v')
getattr(sv, op)(sparse_delta, name='scatter_sv')
self.assertAllEqual(v, ops.convert_to_tensor(sv))
@def_function.function
def func():
getattr(v, op)(sparse_delta, name='scatter_v')
getattr(sv, op)(sparse_delta, name='scatter_sv')
func()
self.assertAllEqual(v, ops.convert_to_tensor(sv))
def test_batch_scatter_update(self):
v = variables_lib.Variable(array_ops.zeros((32, 1)))
sparse_delta = ops.IndexedSlices(
values=constant_op.constant([[0.], [1.], [2.], [3.], [4.], [5.]]),
indices=constant_op.constant([10, 11, 12, 13, 14, 15]))
v0 = variables_lib.Variable(array_ops.zeros((11, 1)))
v1 = variables_lib.Variable(array_ops.zeros((11, 1)))
v2 = variables_lib.Variable(array_ops.zeros((10, 1)))
sv = sharded_variable.ShardedVariable([v0, v1, v2])
v.batch_scatter_update(sparse_delta)
sv.batch_scatter_update(sparse_delta)
self.assertAllEqual(v, ops.convert_to_tensor(sv))
@def_function.function
def func():
v.batch_scatter_update(sparse_delta)
sv.batch_scatter_update(sparse_delta)
func()
self.assertAllEqual(v, ops.convert_to_tensor(sv))
def test_sparse_read(self):
v = variables_lib.Variable(array_ops.zeros((30, 1)))
indices = constant_op.constant([0, 10, 12, 21, 22])
v0 = variables_lib.Variable(array_ops.zeros((10, 1)))
v1 = variables_lib.Variable(array_ops.zeros((10, 1)))
v2 = variables_lib.Variable(array_ops.zeros((10, 1)))
sv = sharded_variable.ShardedVariable([v0, v1, v2])
self.assertAllEqual(v.sparse_read(indices), sv.sparse_read(indices))
@def_function.function
def func():
return v.sparse_read(indices), sv.sparse_read(indices)
got, expect = func()
self.assertAllEqual(got, expect)
def test_control_dep_on_assign(self):
v0 = variables_lib.Variable([[0, 0]])
v1 = variables_lib.Variable([[1, 1], [2, 2]])
v2 = variables_lib.Variable([[3, 3]])
s = sharded_variable.ShardedVariable([v0, v1, v2])
@def_function.function
def func():
ret = s.assign([[4, 4], [5, 5], [6, 6], [7, 7]])
with ops.control_dependencies([ret]):
a = array_ops.ones((1, 1))
with ops.control_dependencies([control_flow_ops.group(ret)]):
b = array_ops.ones((1, 1))
return a, b
func()
def test_convert_to_tensor(self):
v0 = variables_lib.Variable([[0, 0]])
v1 = variables_lib.Variable([[1, 1], [2, 2]])
v2 = variables_lib.Variable([[3, 3]])
s = sharded_variable.ShardedVariable([v0, v1, v2])
t = ops.convert_to_tensor(s)
self.assertAllEqual(t, [[0, 0], [1, 1], [2, 2], [3, 3]])
def test_save_restore(self):
fname = os.path.join(self.get_temp_dir(), 'checkpoint')
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
variables_lib.Variable([2]),
variables_lib.Variable([3])
]
s = sharded_variable.ShardedVariable(variables, name='s')
cp = util.Checkpoint(s=s)
self.assertEqual(self.evaluate(cp.s.variables[0]), [0])
cp.write(fname)
self.evaluate(cp.s.variables[0].assign([4]))
self.assertEqual(self.evaluate(cp.s.variables[0]), [4])
cp.restore(fname)
# Tests that the original weights are restored.
self.assertEqual(self.evaluate(cp.s.variables[0]), [0])
def test_save_restore_different_partitions(self):
fname = os.path.join(self.get_temp_dir(), 'checkpoint')
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
variables_lib.Variable([2]),
variables_lib.Variable([3])
]
s = sharded_variable.ShardedVariable(variables, name='s')
cp = util.Checkpoint(s=s)
cp.write(fname)
variables2 = [variables_lib.Variable([0, 0, 0, 0])]
s2 = sharded_variable.ShardedVariable(variables2, name='s')
# Restore from 4 partitions into 1.
cp2 = util.Checkpoint(s=s2)
cp2.restore(fname)
self.assertAllEqual(self.evaluate(cp2.s.variables[0]), [0, 1, 2, 3])
self.evaluate(cp2.s.variables[0].assign([5, 10, 15, 20]))
cp2.write(fname)
# Restore 1 partition into 4.
cp.restore(fname)
self.assertEqual(self.evaluate(cp.s.variables[0]), [5])
self.assertEqual(self.evaluate(cp.s.variables[1]), [10])
self.assertEqual(self.evaluate(cp.s.variables[2]), [15])
self.assertEqual(self.evaluate(cp.s.variables[3]), [20])
def test_save_restore_4_to_2_partitions(self):
fname = os.path.join(self.get_temp_dir(), 'checkpoint')
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
variables_lib.Variable([2]),
variables_lib.Variable([3])
]
s = sharded_variable.ShardedVariable(variables, name='s')
cp = util.Checkpoint(s=s)
cp.write(fname)
variables2 = [
variables_lib.Variable([0, 0]),
variables_lib.Variable([0, 0])
]
s2 = sharded_variable.ShardedVariable(variables2, name='s')
cp2 = util.Checkpoint(s=s2)
cp2.restore(fname)
# Assert that weights from the 4 partitions were loaded here.
self.assertLen(cp2.s.variables, 2)
self.assertAllEqual(self.evaluate(cp2.s.variables[0]), [0, 1])
self.assertAllEqual(self.evaluate(cp2.s.variables[1]), [2, 3])
def test_delayed_restore(self):
fname = os.path.join(self.get_temp_dir(), 'checkpoint')
model = tracking.AutoTrackable()
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
variables_lib.Variable([2]),
variables_lib.Variable([3])
]
model.s = sharded_variable.ShardedVariable(variables)
cp = util.Checkpoint(model=model)
cp.write(fname)
model2 = tracking.AutoTrackable()
cp2 = util.Checkpoint(model=model2)
cp2.restore(fname)
variables2 = [
variables_lib.Variable([0]),
variables_lib.Variable([0]),
variables_lib.Variable([0]),
variables_lib.Variable([0])
]
model2.s = sharded_variable.ShardedVariable(variables2)
self.assertAllEqual(self.evaluate(model2.s.variables[0]), [0])
self.assertAllEqual(self.evaluate(model2.s.variables[1]), [1])
self.assertAllEqual(self.evaluate(model2.s.variables[2]), [2])
self.assertAllEqual(self.evaluate(model2.s.variables[3]), [3])
def test_delayed_restore_4_to_2_partitions(self):
fname = os.path.join(self.get_temp_dir(), 'checkpoint')
model = tracking.AutoTrackable()
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
variables_lib.Variable([2]),
variables_lib.Variable([3])
]
model.s = sharded_variable.ShardedVariable(variables)
cp = util.Checkpoint(model=model)
cp.write(fname)
model2 = tracking.AutoTrackable()
cp2 = util.Checkpoint(model=model2)
cp2.restore(fname)
variables2 = [
variables_lib.Variable([0, 0]),
variables_lib.Variable([0, 0])
]
model2.s = sharded_variable.ShardedVariable(variables2)
self.assertAllEqual(self.evaluate(model2.s.variables[0]), [0, 1])
self.assertAllEqual(self.evaluate(model2.s.variables[1]), [2, 3])
def test_save_graph_def(self):
root = tracking.AutoTrackable()
v1 = variables_lib.Variable([3.])
v2 = variables_lib.Variable([2.])
root.v = sharded_variable.ShardedVariable([v1, v2])
root.train = def_function.function(
lambda x: embedding_ops.embedding_lookup_v2(root.v.variables, x))
# TODO(b/144057383): Remove the necessity of root.serve once saving context
# is made to tf.function cache.
root.serve = def_function.function(
lambda x: embedding_ops.embedding_lookup_v2(root.v.variables[0], x),
input_signature=[tensor_spec.TensorSpec([2], dtypes.int32, name='x')])
# Trace and use root.train
self.assertAllEqual([3., 2.], root.train([0, 1]).numpy())
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save.save(root, save_dir, root.serve)
self.assertAllEqual([3., 2.],
_load_and_run(save_dir, {'x': [0, 1]})['output_0'])
# Continue using root.train for training
self.assertAllEqual([3., 2.], root.train([0, 1]).numpy())
def test_load_raises_error(self):
root = tracking.AutoTrackable()
v1 = variables_lib.Variable([3.])
v2 = variables_lib.Variable([2.])
root.v = sharded_variable.ShardedVariable([v1, v2])
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save.save(root, save_dir)
with self.assertRaisesRegex(
ValueError, 'Loading a saved_model containing ShardedVariable'):
load.load(save_dir)
def test_validation_errors(self):
with self.assertRaisesRegex(ValueError, 'Expected a list of '):
sharded_variable.ShardedVariable(
[variables_lib.Variable([0]), 'not-a-variable'])
with self.assertRaisesRegex(ValueError, 'must have the same dtype'):
sharded_variable.ShardedVariable([
variables_lib.Variable([0], dtype='int64'),
variables_lib.Variable([1], dtype='int32')
])
with self.assertRaisesRegex(ValueError, 'the same shapes except'):
sharded_variable.ShardedVariable([
variables_lib.Variable(array_ops.ones((5, 10))),
variables_lib.Variable(array_ops.ones((5, 20)))
])
with self.assertRaisesRegex(ValueError, '`SaveSliceInfo` should not'):
v = variables_lib.Variable([0])
v._set_save_slice_info(
variables_lib.Variable.SaveSliceInfo(
full_name='s', full_shape=[2], var_offset=[0], var_shape=[1]))
sharded_variable.ShardedVariable([v])
def test_as_function_input(self):
variables1 = [
variables_lib.Variable([1]),
variables_lib.Variable([1]),
]
s = sharded_variable.ShardedVariable(variables1)
variables2 = [
variables_lib.Variable([2]),
variables_lib.Variable([2]),
]
s2 = sharded_variable.ShardedVariable(variables2)
trace_count = [0]
@def_function.function
def func(sharded_var):
trace_count[0] = trace_count[0] + 1
sharded_var.assign([0, 0])
func(s)
self.assertAllEqual(ops.convert_to_tensor(s), [0, 0])
self.assertEqual(trace_count[0], 1)
func(s2)
self.assertAllEqual(ops.convert_to_tensor(s2), [0, 0])
self.assertEqual(trace_count[0], 1)
def test_flatten(self):
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
]
s = sharded_variable.ShardedVariable(variables)
got = nest.flatten(s)
self.assertIs(s, got[0])
got = nest.flatten(s, expand_composites=True)
self.assertAllEqual(variables, got)
def test_tf_module(self):
class Model(module.Module):
def __init__(self):
super().__init__()
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
]
self.w = sharded_variable.ShardedVariable(variables)
model = Model()
self.assertLen(model.variables, 2)
self.assertEqual(model.variables[0], [0])
self.assertEqual(model.variables[1], [1])
self.assertAllEqual(model.variables, model.trainable_variables)
self.assertLen(model._checkpoint_dependencies, 1)
self.assertIs(model._checkpoint_dependencies[0].ref, model.w)
def test_embedding_lookup(self):
v = [
variables_lib.Variable([[1., 2.], [3., 4.]]),
variables_lib.Variable([[5., 6.], [7., 8.]]),
variables_lib.Variable([[9., 10.]])
]
sv = sharded_variable.ShardedVariable(v)
@def_function.function
def lookup():
ids = constant_op.constant([0, 3, 4])
return embedding_ops.embedding_lookup_v2(sv, ids)
@def_function.function
def sparse_lookup():
sp_ids = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [2, 2]],
values=[0, 3, 4, 1],
dense_shape=[3, 3])
return embedding_ops.embedding_lookup_sparse_v2(sv, sp_ids, None)
@def_function.function
def safe_sparse_lookup():
sp_ids = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [2, 2]],
values=[0, -1, 4, 1],
dense_shape=[3, 3])
sp_weights = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [2, 2]],
values=[1., 1., -1., 1.],
dense_shape=[3, 3])
return embedding_ops.safe_embedding_lookup_sparse_v2(
sv, sp_ids, sp_weights)
# TODO(chenkai): Add safe_sparse_lookup to the list. Currently
# ShardedVariable is converted to a tensor in safe_sparse_lookup.
for func in [lookup, sparse_lookup]:
num_gather_ops = 0
for op in func.get_concrete_function().graph.get_operations():
if op.type == 'ResourceGather':
num_gather_ops += 1
self.assertEqual(
num_gather_ops, len(v), 'Number of ResourceGather op does not match'
' expected, possibly due to ShardedVariable accidentally being'
' converted to tensor in embedding_lookup ops.')
self.assertAllEqual(lookup(), [[1., 2.], [7., 8.], [9., 10.]])
self.assertAllClose(sparse_lookup(), [[4., 5.], [9., 10.], [3., 4.]])
self.assertAllClose(safe_sparse_lookup(), [[1., 2.], [0., 0.], [3., 4.]])
def test_slicing(self):
v = [
variables_lib.Variable([[1, 2], [3, 4], [5, 6]]),
variables_lib.Variable([[7, 8], [9, 10], [11, 12]]),
variables_lib.Variable([[13, 14], [15, 16]])
]
sv = sharded_variable.ShardedVariable(v)
empty = v[0][0:0]
# Test cases: positive step
self.assertAllEqual(sv[:], array_ops.concat(v, axis=0))
self.assertAllEqual(sv[:2], [[1, 2], [3, 4]])
self.assertAllEqual(sv[-8:2], [[1, 2], [3, 4]])
self.assertAllEqual(sv[-10:2], [[1, 2], [3, 4]])
self.assertAllEqual(sv[5:], [[11, 12], [13, 14], [15, 16]])
self.assertAllEqual(sv[5:-1], [[11, 12], [13, 14]])
self.assertAllEqual(sv[::3], [[1, 2], [7, 8], [13, 14]])
self.assertAllEqual(sv[::5], [[1, 2], [11, 12]])
self.assertAllEqual(sv[1::6], [[3, 4], [15, 16]])
self.assertAllEqual(sv[1:5:6], [[3, 4]])
self.assertAllEqual(sv[1::7], [[3, 4]])
self.assertAllEqual(sv[2:7], [[5, 6], [7, 8], [9, 10], [11, 12], [13, 14]])
self.assertAllEqual(sv[2:7:2], [[5, 6], [9, 10], [13, 14]])
self.assertAllEqual(sv[2:7:3], [[5, 6], [11, 12]])
# Test cases: negative step
self.assertAllEqual(
sv[::-1], array_ops.reverse(array_ops.concat(v, axis=0), axis=[0]))
self.assertAllEqual(sv[2::-1], [[5, 6], [3, 4], [1, 2]])
self.assertAllEqual(sv[2:-8:-1], [[5, 6], [3, 4]])
self.assertAllEqual(sv[2:-10:-1], [[5, 6], [3, 4], [1, 2]])
self.assertAllEqual(sv[4::-1], [[9, 10], [7, 8], [5, 6], [3, 4], [1, 2]])
self.assertAllEqual(sv[-1:-3:-1], [[15, 16], [13, 14]])
self.assertAllEqual(sv[::-5], [[15, 16], [5, 6]])
self.assertAllEqual(sv[6::-6], [[13, 14], [1, 2]])
self.assertAllEqual(sv[6:5:-6], [[13, 14]])
self.assertAllEqual(sv[6::-7], [[13, 14]])
self.assertAllEqual(sv[7:1:-1],
[[15, 16], [13, 14], [11, 12], [9, 10], [7, 8], [5, 6]])
self.assertAllEqual(sv[7:1:-2], [[15, 16], [11, 12], [7, 8]])
self.assertAllEqual(sv[7:1:-4], [[15, 16], [7, 8]])
# Test cases: empty slice
self.assertAllEqual(sv[0:0], empty)
self.assertAllEqual(sv[5:3], empty)
self.assertAllEqual(sv[3:5:-1], empty)
self.assertAllEqual(sv[-1:0], empty)
self.assertAllEqual(sv[2:-1:-1], empty)
# Test cases: slicing other dimensions
self.assertAllEqual(sv[:, 0], [1, 3, 5, 7, 9, 11, 13, 15])
self.assertAllEqual(sv[:, 0:1], [[1], [3], [5], [7], [9], [11], [13], [15]])
# Test cases: normal indexing
self.assertAllEqual(sv[2], [5, 6])
self.assertAllEqual(sv[6], [13, 14])
self.assertAllEqual(sv[2, 1], 6)
self.assertAllEqual(sv[-2], [13, 14])
with self.assertRaisesRegex(IndexError, 'out of bounds'):
_ = sv[100]
with self.assertRaisesRegex(IndexError, 'out of bounds'):
_ = sv[-100]
# Test cases: Ellipsis
self.assertAllEqual(sv[...], array_ops.concat(v, axis=0))
self.assertAllEqual(sv[..., 0], [1, 3, 5, 7, 9, 11, 13, 15])
self.assertAllEqual(sv[0:1, ...], [[1, 2]])
# Test cases: newaxis
self.assertAllEqual(
sv[array_ops.newaxis, ...],
array_ops.expand_dims_v2(array_ops.concat(v, axis=0), axis=0))
# Test cases: boolean masks
self.assertAllEqual(sv[ops.convert_to_tensor(sv) > 10],
[11, 12, 13, 14, 15, 16])
# Test cases: tensor input
with self.assertRaisesRegex(TypeError, 'not allowed'):
_ = sv[constant_op.constant(1)::]
with self.assertRaisesRegex(TypeError, 'not allowed'):
_ = sv[:constant_op.constant(1):]
with self.assertRaisesRegex(TypeError, 'not allowed'):
_ = sv[constant_op.constant(1)]
# Test cases: inside tf.function
@def_function.function
def func():
a = sv[:, 0]
return a
self.assertAllEqual(func(), [1, 3, 5, 7, 9, 11, 13, 15])
def test_operator_overload(self):
v1 = [
variables_lib.Variable([1.]),
variables_lib.Variable([2.]),
]
sv1 = sharded_variable.ShardedVariable(v1)
v2 = [
variables_lib.Variable([1.]),
variables_lib.Variable([2.]),
]
sv2 = sharded_variable.ShardedVariable(v2)
equal = sv1 == sv2
self.assertAllEqual(equal, [True, True])
self.assertAllEqual(sv1 + sv2, [2.0, 4.0])
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
PYSEC-2021-264
|
tensorflow/python/kernel_tests/sparse_serialization_ops_test.py
|
@@ -16,10 +16,12 @@
import numpy as np
+from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
@@ -460,6 +462,18 @@ def testDeserializeManyFailsInvalidProto(self):
self._testDeserializeFailsInvalidProtoHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
+ def testDeserializeInvalidVariant(self):
+ mu = gen_resource_variable_ops.mutex_v2()
+ mu_lock = gen_resource_variable_ops.mutex_lock(mutex=mu)
+
+ @def_function.function
+ def f():
+ return sparse_ops.deserialize_sparse(
+ serialized_sparse=mu_lock, dtype=dtypes.int32)
+
+ with self.assertRaisesRegex(ValueError, r"Shape must be at least rank 1"):
+ f()
+
if __name__ == "__main__":
test.main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SerializeSparse."""
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SerializeSparseTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _testSerializeDeserializeHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_5x6(np.arange(6))
serialized = serialize_fn(sp_input, out_type=out_type)
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
indices, values, shape = self.evaluate(sp_deserialized)
self.assertAllEqual(indices, sp_input[0])
self.assertAllEqual(values, sp_input[1])
self.assertAllEqual(shape, sp_input[2])
def testSerializeDeserialize(self):
self._testSerializeDeserializeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
def testVariantSerializeDeserialize(self):
self._testSerializeDeserializeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeDeserializeBatchHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_5x6(np.arange(6))
serialized = serialize_fn(sp_input, out_type=out_type)
serialized = array_ops.stack([serialized, serialized])
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input[0])
self.assertAllEqual(combined_values[:6], sp_input[1])
self.assertAllEqual(combined_values[6:], sp_input[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testSerializeDeserializeBatch(self):
self._testSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testSerializeDeserializeManyBatch(self):
self._testSerializeDeserializeBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeBatch(self):
self._testSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeDeserializeBatchInconsistentShapeHelper(
self, serialize_fn, deserialize_fn, out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testSerializeDeserializeBatchInconsistentShape(self):
self._testSerializeDeserializeBatchInconsistentShapeHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeBatchInconsistentShape(self):
self._testSerializeDeserializeBatchInconsistentShapeHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeDeserializeNestedBatchHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_5x6(np.arange(6))
serialized = serialize_fn(sp_input, out_type=out_type)
serialized = array_ops.stack([serialized, serialized])
serialized = array_ops.stack([serialized, serialized])
sp_deserialized = deserialize_fn(serialized, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
# minibatch 0
self.assertAllEqual(combined_indices[:6, :2], [[0, 0]] * 6)
self.assertAllEqual(combined_indices[:6, 2:], sp_input[0])
self.assertAllEqual(combined_values[:6], sp_input[1])
# minibatch 1
self.assertAllEqual(combined_indices[6:12, :2], [[0, 1]] * 6)
self.assertAllEqual(combined_indices[6:12, 2:], sp_input[0])
self.assertAllEqual(combined_values[6:12], sp_input[1])
# minibatch 2
self.assertAllEqual(combined_indices[12:18, :2], [[1, 0]] * 6)
self.assertAllEqual(combined_indices[12:18, 2:], sp_input[0])
self.assertAllEqual(combined_values[12:18], sp_input[1])
# minibatch 3
self.assertAllEqual(combined_indices[18:, :2], [[1, 1]] * 6)
self.assertAllEqual(combined_indices[18:, 2:], sp_input[0])
self.assertAllEqual(combined_values[18:], sp_input[1])
self.assertAllEqual(combined_shape, [2, 2, 5, 6])
@test_util.run_deprecated_v1
def testSerializeDeserializeNestedBatch(self):
self._testSerializeDeserializeNestedBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeNestedBatch(self):
self._testSerializeDeserializeNestedBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
def _testFeedSerializeDeserializeBatchHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized, {sp_input0: input0_val,
sp_input1: input1_val})
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
@test_util.run_deprecated_v1
def testFeedSerializeDeserializeBatch(self):
self._testFeedSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testFeedSerializeDeserializeManyBatch(self):
self._testFeedSerializeDeserializeBatchHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testFeedVariantSerializeDeserializeBatch(self):
self._testFeedSerializeDeserializeBatchHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testSerializeManyShapeHelper(self,
serialize_many_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
serialized = serialize_many_fn(sparse_tensor, out_type=out_type)
serialized_value = sess.run(
serialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(serialized_value.shape, (4, 3))
@test_util.run_deprecated_v1
def testSerializeManyShape(self):
self._testSerializeManyShapeHelper(sparse_ops.serialize_many_sparse)
def testVariantSerializeManyShape(self):
# NOTE: The following test is a no-op as it is currently not possible to
# convert the serialized variant value to a numpy value.
pass
def _testSerializeManyDeserializeBatchHelper(self,
serialize_many_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
serialized = serialize_many_fn(sparse_tensor, out_type=out_type)
deserialized = deserialize_fn(serialized, dtype=dtypes.string)
deserialized_value = sess.run(
deserialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertAllEqual(deserialized_value.indices, indices_value)
self.assertAllEqual(deserialized_value.values, values_value)
self.assertAllEqual(deserialized_value.dense_shape, shape_value)
@test_util.run_deprecated_v1
def testSerializeManyDeserializeBatch(self):
self._testSerializeManyDeserializeBatchHelper(
sparse_ops.serialize_many_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testSerializeManyDeserializeManyBatch(self):
self._testSerializeManyDeserializeBatchHelper(
sparse_ops.serialize_many_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantSerializeManyDeserializeBatch(self):
self._testSerializeManyDeserializeBatchHelper(
sparse_ops.serialize_many_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeScalar(self):
with self.session(use_gpu=False) as sess:
indices_value = np.array([[]], dtype=np.int64)
values_value = np.array([37], dtype=np.int32)
shape_value = np.array([], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder()
serialized = sparse_ops.serialize_sparse(
sparse_tensor, out_type=dtypes.variant)
deserialized = sparse_ops.deserialize_sparse(
serialized, dtype=dtypes.int32)
deserialized_value = sess.run(
deserialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertAllEqual(deserialized_value.indices, indices_value)
self.assertAllEqual(deserialized_value.values, values_value)
self.assertAllEqual(deserialized_value.dense_shape, shape_value)
@test_util.run_deprecated_v1
def testVariantSerializeDeserializeScalarBatch(self):
with self.session(use_gpu=False) as sess:
indices_value = np.array([[]], dtype=np.int64)
values_value = np.array([37], dtype=np.int32)
shape_value = np.array([], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder()
serialized = sparse_ops.serialize_sparse(
sparse_tensor, out_type=dtypes.variant)
stacked = array_ops.stack([serialized, serialized])
deserialized = sparse_ops.deserialize_sparse(stacked, dtype=dtypes.int32)
deserialized_value = sess.run(
deserialized,
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertAllEqual(deserialized_value.indices,
np.array([[0], [1]], dtype=np.int64))
self.assertAllEqual(deserialized_value.values,
np.array([37, 37], dtype=np.int32))
self.assertAllEqual(deserialized_value.dense_shape,
np.array([2], dtype=np.int64))
def _testDeserializeFailsWrongTypeHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int64)
with self.assertRaisesOpError(
r"Requested SparseTensor of type int64 but "
r"SparseTensor\[0\].values.dtype\(\) == int32"):
sess.run(sp_deserialized,
{sp_input0: input0_val,
sp_input1: input1_val})
@test_util.run_deprecated_v1
def testDeserializeFailsWrongType(self):
self._testDeserializeFailsWrongTypeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testDeserializeManyFailsWrongType(self):
self._testDeserializeFailsWrongTypeHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantDeserializeFailsWrongType(self):
self._testDeserializeFailsWrongTypeHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse,
dtypes.variant)
def _testDeserializeFailsInconsistentRankHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = serialize_fn(sp_input1, out_type=out_type)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(
r"Inconsistent shape across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 2 but rank of SparseTensor\[1\] is: 3"):
sess.run(sp_deserialized,
{sp_input0: input0_val,
sp_input1: input1_val})
@test_util.run_deprecated_v1
def testDeserializeFailsInconsistentRank(self):
self._testDeserializeFailsInconsistentRankHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testDeserializeManyFailsInconsistentRank(self):
self._testDeserializeFailsInconsistentRankHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
@test_util.run_deprecated_v1
def testVariantDeserializeFailsInconsistentRank(self):
self._testDeserializeFailsInconsistentRankHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
def _testDeserializeFailsInvalidProtoHelper(self,
serialize_fn,
deserialize_fn,
out_type=dtypes.string):
with self.cached_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
serialized0 = serialize_fn(sp_input0, out_type=out_type)
serialized1 = ["a", "b", "c"]
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = deserialize_fn(serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(r"Could not parse serialized proto"):
sess.run(sp_deserialized, {sp_input0: input0_val})
@test_util.run_deprecated_v1
def testDeserializeFailsInvalidProto(self):
self._testDeserializeFailsInvalidProtoHelper(sparse_ops.serialize_sparse,
sparse_ops.deserialize_sparse)
@test_util.run_deprecated_v1
def testDeserializeManyFailsInvalidProto(self):
self._testDeserializeFailsInvalidProtoHelper(
sparse_ops.serialize_sparse, sparse_ops.deserialize_many_sparse)
if __name__ == "__main__":
test.main()
|
PYSEC-2021-822
|
synapse/config/tls.py
|
@@ -17,7 +17,7 @@
import warnings
from datetime import datetime
from hashlib import sha256
-from typing import List, Optional
+from typing import List, Optional, Pattern
from unpaddedbase64 import encode_base64
@@ -124,7 +124,7 @@ def read_config(self, config: dict, config_dir_path: str, **kwargs):
fed_whitelist_entries = []
# Support globs (*) in whitelist values
- self.federation_certificate_verification_whitelist = [] # type: List[str]
+ self.federation_certificate_verification_whitelist = [] # type: List[Pattern]
for entry in fed_whitelist_entries:
try:
entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii"))
|
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import warnings
from datetime import datetime
from hashlib import sha256
from typing import List, Optional
from unpaddedbase64 import encode_base64
from OpenSSL import SSL, crypto
from twisted.internet._sslverify import Certificate, trustRootFromCertificates
from synapse.config._base import Config, ConfigError
from synapse.util import glob_to_regex
logger = logging.getLogger(__name__)
ACME_SUPPORT_ENABLED_WARN = """\
This server uses Synapse's built-in ACME support. Note that ACME v1 has been
deprecated by Let's Encrypt, and that Synapse doesn't currently support ACME v2,
which means that this feature will not work with Synapse installs set up after
November 2019, and that it may stop working on June 2020 for installs set up
before that date.
For more info and alternative solutions, see
https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
--------------------------------------------------------------------------------"""
class TlsConfig(Config):
section = "tls"
def read_config(self, config: dict, config_dir_path: str, **kwargs):
acme_config = config.get("acme", None)
if acme_config is None:
acme_config = {}
self.acme_enabled = acme_config.get("enabled", False)
if self.acme_enabled:
logger.warning(ACME_SUPPORT_ENABLED_WARN)
# hyperlink complains on py2 if this is not a Unicode
self.acme_url = str(
acme_config.get("url", "https://acme-v01.api.letsencrypt.org/directory")
)
self.acme_port = acme_config.get("port", 80)
self.acme_bind_addresses = acme_config.get("bind_addresses", ["::", "0.0.0.0"])
self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
self.acme_domain = acme_config.get("domain", config.get("server_name"))
self.acme_account_key_file = self.abspath(
acme_config.get("account_key_file", config_dir_path + "/client.key")
)
self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))
if self.root.server.has_tls_listener():
if not self.tls_certificate_file:
raise ConfigError(
"tls_certificate_path must be specified if TLS-enabled listeners are "
"configured."
)
if not self.tls_private_key_file:
raise ConfigError(
"tls_private_key_path must be specified if TLS-enabled listeners are "
"configured."
)
self._original_tls_fingerprints = config.get("tls_fingerprints", [])
if self._original_tls_fingerprints is None:
self._original_tls_fingerprints = []
self.tls_fingerprints = list(self._original_tls_fingerprints)
# Whether to verify certificates on outbound federation traffic
self.federation_verify_certificates = config.get(
"federation_verify_certificates", True
)
# Minimum TLS version to use for outbound federation traffic
self.federation_client_minimum_tls_version = str(
config.get("federation_client_minimum_tls_version", 1)
)
if self.federation_client_minimum_tls_version not in ["1", "1.1", "1.2", "1.3"]:
raise ConfigError(
"federation_client_minimum_tls_version must be one of: 1, 1.1, 1.2, 1.3"
)
# Prevent people shooting themselves in the foot here by setting it to
# the biggest number blindly
if self.federation_client_minimum_tls_version == "1.3":
if getattr(SSL, "OP_NO_TLSv1_3", None) is None:
raise ConfigError(
(
"federation_client_minimum_tls_version cannot be 1.3, "
"your OpenSSL does not support it"
)
)
# Whitelist of domains to not verify certificates for
fed_whitelist_entries = config.get(
"federation_certificate_verification_whitelist", []
)
if fed_whitelist_entries is None:
fed_whitelist_entries = []
# Support globs (*) in whitelist values
self.federation_certificate_verification_whitelist = [] # type: List[str]
for entry in fed_whitelist_entries:
try:
entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii"))
except UnicodeEncodeError:
raise ConfigError(
"IDNA domain names are not allowed in the "
"federation_certificate_verification_whitelist: %s" % (entry,)
)
# Convert globs to regex
self.federation_certificate_verification_whitelist.append(entry_regex)
# List of custom certificate authorities for federation traffic validation
custom_ca_list = config.get("federation_custom_ca_list", None)
# Read in and parse custom CA certificates
self.federation_ca_trust_root = None
if custom_ca_list is not None:
if len(custom_ca_list) == 0:
# A trustroot cannot be generated without any CA certificates.
# Raise an error if this option has been specified without any
# corresponding certificates.
raise ConfigError(
"federation_custom_ca_list specified without "
"any certificate files"
)
certs = []
for ca_file in custom_ca_list:
logger.debug("Reading custom CA certificate file: %s", ca_file)
content = self.read_file(ca_file, "federation_custom_ca_list")
# Parse the CA certificates
try:
cert_base = Certificate.loadPEM(content)
certs.append(cert_base)
except Exception as e:
raise ConfigError(
"Error parsing custom CA certificate file %s: %s" % (ca_file, e)
)
self.federation_ca_trust_root = trustRootFromCertificates(certs)
# This config option applies to non-federation HTTP clients
# (e.g. for talking to recaptcha, identity servers, and such)
# It should never be used in production, and is intended for
# use only when running tests.
self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
"use_insecure_ssl_client_just_for_testing_do_not_use"
)
self.tls_certificate = None # type: Optional[crypto.X509]
self.tls_private_key = None # type: Optional[crypto.PKey]
def is_disk_cert_valid(self, allow_self_signed=True):
"""
Is the certificate we have on disk valid, and if so, for how long?
Args:
allow_self_signed (bool): Should we allow the certificate we
read to be self signed?
Returns:
int: Days remaining of certificate validity.
None: No certificate exists.
"""
if not os.path.exists(self.tls_certificate_file):
return None
try:
with open(self.tls_certificate_file, "rb") as f:
cert_pem = f.read()
except Exception as e:
raise ConfigError(
"Failed to read existing certificate file %s: %s"
% (self.tls_certificate_file, e)
)
try:
tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
except Exception as e:
raise ConfigError(
"Failed to parse existing certificate file %s: %s"
% (self.tls_certificate_file, e)
)
if not allow_self_signed:
if tls_certificate.get_subject() == tls_certificate.get_issuer():
raise ValueError(
"TLS Certificate is self signed, and this is not permitted"
)
# YYYYMMDDhhmmssZ -- in UTC
expires_on = datetime.strptime(
tls_certificate.get_notAfter().decode("ascii"), "%Y%m%d%H%M%SZ"
)
now = datetime.utcnow()
days_remaining = (expires_on - now).days
return days_remaining
def read_certificate_from_disk(self, require_cert_and_key: bool):
"""
Read the certificates and private key from disk.
Args:
require_cert_and_key: set to True to throw an error if the certificate
and key file are not given
"""
if require_cert_and_key:
self.tls_private_key = self.read_tls_private_key()
self.tls_certificate = self.read_tls_certificate()
elif self.tls_certificate_file:
# we only need the certificate for the tls_fingerprints. Reload it if we
# can, but it's not a fatal error if we can't.
try:
self.tls_certificate = self.read_tls_certificate()
except Exception as e:
logger.info(
"Unable to read TLS certificate (%s). Ignoring as no "
"tls listeners enabled.",
e,
)
self.tls_fingerprints = list(self._original_tls_fingerprints)
if self.tls_certificate:
# Check that our own certificate is included in the list of fingerprints
# and include it if it is not.
x509_certificate_bytes = crypto.dump_certificate(
crypto.FILETYPE_ASN1, self.tls_certificate
)
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
sha256_fingerprints = {f["sha256"] for f in self.tls_fingerprints}
if sha256_fingerprint not in sha256_fingerprints:
self.tls_fingerprints.append({"sha256": sha256_fingerprint})
def generate_config_section(
self,
config_dir_path,
server_name,
data_dir_path,
tls_certificate_path,
tls_private_key_path,
acme_domain,
**kwargs,
):
"""If the acme_domain is specified acme will be enabled.
If the TLS paths are not specified the default will be certs in the
config directory"""
base_key_name = os.path.join(config_dir_path, server_name)
if bool(tls_certificate_path) != bool(tls_private_key_path):
raise ConfigError(
"Please specify both a cert path and a key path or neither."
)
tls_enabled = (
"" if tls_certificate_path and tls_private_key_path or acme_domain else "#"
)
if not tls_certificate_path:
tls_certificate_path = base_key_name + ".tls.crt"
if not tls_private_key_path:
tls_private_key_path = base_key_name + ".tls.key"
acme_enabled = bool(acme_domain)
acme_domain = "matrix.example.com"
default_acme_account_file = os.path.join(data_dir_path, "acme_account.key")
# this is to avoid the max line length. Sorrynotsorry
proxypassline = (
"ProxyPass /.well-known/acme-challenge "
"http://localhost:8009/.well-known/acme-challenge"
)
# flake8 doesn't recognise that variables are used in the below string
_ = tls_enabled, proxypassline, acme_enabled, default_acme_account_file
return (
"""\
## TLS ##
# PEM-encoded X509 certificate for TLS.
# This certificate, as of Synapse 1.0, will need to be a valid and verifiable
# certificate, signed by a recognised Certificate Authority.
#
# See 'ACME support' below to enable auto-provisioning this certificate via
# Let's Encrypt.
#
# If supplying your own, be sure to use a `.pem` file that includes the
# full certificate chain including any intermediate certificates (for
# instance, if using certbot, use `fullchain.pem` as your certificate,
# not `cert.pem`).
#
%(tls_enabled)stls_certificate_path: "%(tls_certificate_path)s"
# PEM-encoded private key for TLS
#
%(tls_enabled)stls_private_key_path: "%(tls_private_key_path)s"
# Whether to verify TLS server certificates for outbound federation requests.
#
# Defaults to `true`. To disable certificate verification, uncomment the
# following line.
#
#federation_verify_certificates: false
# The minimum TLS version that will be used for outbound federation requests.
#
# Defaults to `1`. Configurable to `1`, `1.1`, `1.2`, or `1.3`. Note
# that setting this value higher than `1.2` will prevent federation to most
# of the public Matrix network: only configure it to `1.3` if you have an
# entirely private federation setup and you can ensure TLS 1.3 support.
#
#federation_client_minimum_tls_version: 1.2
# Skip federation certificate verification on the following whitelist
# of domains.
#
# This setting should only be used in very specific cases, such as
# federation over Tor hidden services and similar. For private networks
# of homeservers, you likely want to use a private CA instead.
#
# Only effective if federation_verify_certicates is `true`.
#
#federation_certificate_verification_whitelist:
# - lon.example.com
# - *.domain.com
# - *.onion
# List of custom certificate authorities for federation traffic.
#
# This setting should only normally be used within a private network of
# homeservers.
#
# Note that this list will replace those that are provided by your
# operating environment. Certificates must be in PEM format.
#
#federation_custom_ca_list:
# - myCA1.pem
# - myCA2.pem
# - myCA3.pem
# ACME support: This will configure Synapse to request a valid TLS certificate
# for your configured `server_name` via Let's Encrypt.
#
# Note that ACME v1 is now deprecated, and Synapse currently doesn't support
# ACME v2. This means that this feature currently won't work with installs set
# up after November 2019. For more info, and alternative solutions, see
# https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1
#
# Note that provisioning a certificate in this way requires port 80 to be
# routed to Synapse so that it can complete the http-01 ACME challenge.
# By default, if you enable ACME support, Synapse will attempt to listen on
# port 80 for incoming http-01 challenges - however, this will likely fail
# with 'Permission denied' or a similar error.
#
# There are a couple of potential solutions to this:
#
# * If you already have an Apache, Nginx, or similar listening on port 80,
# you can configure Synapse to use an alternate port, and have your web
# server forward the requests. For example, assuming you set 'port: 8009'
# below, on Apache, you would write:
#
# %(proxypassline)s
#
# * Alternatively, you can use something like `authbind` to give Synapse
# permission to listen on port 80.
#
acme:
# ACME support is disabled by default. Set this to `true` and uncomment
# tls_certificate_path and tls_private_key_path above to enable it.
#
enabled: %(acme_enabled)s
# Endpoint to use to request certificates. If you only want to test,
# use Let's Encrypt's staging url:
# https://acme-staging.api.letsencrypt.org/directory
#
#url: https://acme-v01.api.letsencrypt.org/directory
# Port number to listen on for the HTTP-01 challenge. Change this if
# you are forwarding connections through Apache/Nginx/etc.
#
port: 80
# Local addresses to listen on for incoming connections.
# Again, you may want to change this if you are forwarding connections
# through Apache/Nginx/etc.
#
bind_addresses: ['::', '0.0.0.0']
# How many days remaining on a certificate before it is renewed.
#
reprovision_threshold: 30
# The domain that the certificate should be for. Normally this
# should be the same as your Matrix domain (i.e., 'server_name'), but,
# by putting a file at 'https://<server_name>/.well-known/matrix/server',
# you can delegate incoming traffic to another server. If you do that,
# you should give the target of the delegation here.
#
# For example: if your 'server_name' is 'example.com', but
# 'https://example.com/.well-known/matrix/server' delegates to
# 'matrix.example.com', you should put 'matrix.example.com' here.
#
# If not set, defaults to your 'server_name'.
#
domain: %(acme_domain)s
# file to use for the account key. This will be generated if it doesn't
# exist.
#
# If unspecified, we will use CONFDIR/client.key.
#
account_key_file: %(default_acme_account_file)s
# List of allowed TLS fingerprints for this server to publish along
# with the signing keys for this server. Other matrix servers that
# make HTTPS requests to this server will check that the TLS
# certificates returned by this server match one of the fingerprints.
#
# Synapse automatically adds the fingerprint of its own certificate
# to the list. So if federation traffic is handled directly by synapse
# then no modification to the list is required.
#
# If synapse is run behind a load balancer that handles the TLS then it
# will be necessary to add the fingerprints of the certificates used by
# the loadbalancers to this list if they are different to the one
# synapse is using.
#
# Homeservers are permitted to cache the list of TLS fingerprints
# returned in the key responses up to the "valid_until_ts" returned in
# key. It may be necessary to publish the fingerprints of a new
# certificate and wait until the "valid_until_ts" of the previous key
# responses have passed before deploying it.
#
# You can calculate a fingerprint from a given TLS listener via:
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
# or by checking matrix.org/federationtester/api/report?server_name=$host
#
#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
"""
# Lowercase the string representation of boolean values
% {
x[0]: str(x[1]).lower() if isinstance(x[1], bool) else x[1]
for x in locals().items()
}
)
def read_tls_certificate(self) -> crypto.X509:
"""Reads the TLS certificate from the configured file, and returns it
Also checks if it is self-signed, and warns if so
Returns:
The certificate
"""
cert_path = self.tls_certificate_file
logger.info("Loading TLS certificate from %s", cert_path)
cert_pem = self.read_file(cert_path, "tls_certificate_path")
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
# Check if it is self-signed, and issue a warning if so.
if cert.get_issuer() == cert.get_subject():
warnings.warn(
(
"Self-signed TLS certificates will not be accepted by Synapse 1.0. "
"Please either provide a valid certificate, or use Synapse's ACME "
"support to provision one."
)
)
return cert
def read_tls_private_key(self) -> crypto.PKey:
"""Reads the TLS private key from the configured file, and returns it
Returns:
The private key
"""
private_key_path = self.tls_private_key_file
logger.info("Loading TLS key from %s", private_key_path)
private_key_pem = self.read_file(private_key_path, "tls_private_key_path")
return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
|
PYSEC-2021-135
|
synapse/push/push_rule_evaluator.py
|
@@ -19,6 +19,7 @@
from synapse.events import EventBase
from synapse.types import UserID
+from synapse.util import glob_to_regex, re_word_boundary
from synapse.util.caches.lrucache import LruCache
logger = logging.getLogger(__name__)
@@ -183,7 +184,7 @@ def _contains_display_name(self, display_name: str) -> bool:
r = regex_cache.get((display_name, False, True), None)
if not r:
r1 = re.escape(display_name)
- r1 = _re_word_boundary(r1)
+ r1 = re_word_boundary(r1)
r = re.compile(r1, flags=re.IGNORECASE)
regex_cache[(display_name, False, True)] = r
@@ -212,64 +213,14 @@ def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool:
try:
r = regex_cache.get((glob, True, word_boundary), None)
if not r:
- r = _glob_to_re(glob, word_boundary)
+ r = glob_to_regex(glob, word_boundary)
regex_cache[(glob, True, word_boundary)] = r
return bool(r.search(value))
except re.error:
logger.warning("Failed to parse glob to regex: %r", glob)
return False
-def _glob_to_re(glob: str, word_boundary: bool) -> Pattern:
- """Generates regex for a given glob.
-
- Args:
- glob
- word_boundary: Whether to match against word boundaries or entire string.
- """
- if IS_GLOB.search(glob):
- r = re.escape(glob)
-
- r = r.replace(r"\*", ".*?")
- r = r.replace(r"\?", ".")
-
- # handle [abc], [a-z] and [!a-z] style ranges.
- r = GLOB_REGEX.sub(
- lambda x: (
- "[%s%s]" % (x.group(1) and "^" or "", x.group(2).replace(r"\\\-", "-"))
- ),
- r,
- )
- if word_boundary:
- r = _re_word_boundary(r)
-
- return re.compile(r, flags=re.IGNORECASE)
- else:
- r = "^" + r + "$"
-
- return re.compile(r, flags=re.IGNORECASE)
- elif word_boundary:
- r = re.escape(glob)
- r = _re_word_boundary(r)
-
- return re.compile(r, flags=re.IGNORECASE)
- else:
- r = "^" + re.escape(glob) + "$"
- return re.compile(r, flags=re.IGNORECASE)
-
-
-def _re_word_boundary(r: str) -> str:
- """
- Adds word boundary characters to the start and end of an
- expression to require that the match occur as a whole word,
- but do so respecting the fact that strings starting or ending
- with non-word characters will change word boundaries.
- """
- # we can't use \b as it chokes on unicode. however \W seems to be okay
- # as shorthand for [^0-9A-Za-z_].
- return r"(^|\W)%s(\W|$)" % (r,)
-
-
def _flatten_dict(
d: Union[EventBase, dict],
prefix: Optional[List[str]] = None,
|
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import Any, Dict, List, Optional, Pattern, Tuple, Union
from synapse.events import EventBase
from synapse.types import UserID
from synapse.util.caches.lrucache import LruCache
logger = logging.getLogger(__name__)
GLOB_REGEX = re.compile(r"\\\[(\\\!|)(.*)\\\]")
IS_GLOB = re.compile(r"[\?\*\[\]]")
INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$")
def _room_member_count(
ev: EventBase, condition: Dict[str, Any], room_member_count: int
) -> bool:
return _test_ineq_condition(condition, room_member_count)
def _sender_notification_permission(
ev: EventBase,
condition: Dict[str, Any],
sender_power_level: int,
power_levels: Dict[str, Union[int, Dict[str, int]]],
) -> bool:
notif_level_key = condition.get("key")
if notif_level_key is None:
return False
notif_levels = power_levels.get("notifications", {})
assert isinstance(notif_levels, dict)
room_notif_level = notif_levels.get(notif_level_key, 50)
return sender_power_level >= room_notif_level
def _test_ineq_condition(condition: Dict[str, Any], number: int) -> bool:
if "is" not in condition:
return False
m = INEQUALITY_EXPR.match(condition["is"])
if not m:
return False
ineq = m.group(1)
rhs = m.group(2)
if not rhs.isdigit():
return False
rhs_int = int(rhs)
if ineq == "" or ineq == "==":
return number == rhs_int
elif ineq == "<":
return number < rhs_int
elif ineq == ">":
return number > rhs_int
elif ineq == ">=":
return number >= rhs_int
elif ineq == "<=":
return number <= rhs_int
else:
return False
def tweaks_for_actions(actions: List[Union[str, Dict]]) -> Dict[str, Any]:
"""
Converts a list of actions into a `tweaks` dict (which can then be passed to
the push gateway).
This function ignores all actions other than `set_tweak` actions, and treats
absent `value`s as `True`, which agrees with the only spec-defined treatment
of absent `value`s (namely, for `highlight` tweaks).
Args:
actions: list of actions
e.g. [
{"set_tweak": "a", "value": "AAA"},
{"set_tweak": "b", "value": "BBB"},
{"set_tweak": "highlight"},
"notify"
]
Returns:
dictionary of tweaks for those actions
e.g. {"a": "AAA", "b": "BBB", "highlight": True}
"""
tweaks = {}
for a in actions:
if not isinstance(a, dict):
continue
if "set_tweak" in a:
# value is allowed to be absent in which case the value assumed
# should be True.
tweaks[a["set_tweak"]] = a.get("value", True)
return tweaks
class PushRuleEvaluatorForEvent:
def __init__(
self,
event: EventBase,
room_member_count: int,
sender_power_level: int,
power_levels: Dict[str, Union[int, Dict[str, int]]],
):
self._event = event
self._room_member_count = room_member_count
self._sender_power_level = sender_power_level
self._power_levels = power_levels
# Maps strings of e.g. 'content.body' -> event["content"]["body"]
self._value_cache = _flatten_dict(event)
def matches(
self, condition: Dict[str, Any], user_id: str, display_name: str
) -> bool:
if condition["kind"] == "event_match":
return self._event_match(condition, user_id)
elif condition["kind"] == "contains_display_name":
return self._contains_display_name(display_name)
elif condition["kind"] == "room_member_count":
return _room_member_count(self._event, condition, self._room_member_count)
elif condition["kind"] == "sender_notification_permission":
return _sender_notification_permission(
self._event, condition, self._sender_power_level, self._power_levels
)
else:
return True
def _event_match(self, condition: dict, user_id: str) -> bool:
pattern = condition.get("pattern", None)
if not pattern:
pattern_type = condition.get("pattern_type", None)
if pattern_type == "user_id":
pattern = user_id
elif pattern_type == "user_localpart":
pattern = UserID.from_string(user_id).localpart
if not pattern:
logger.warning("event_match condition with no pattern")
return False
# XXX: optimisation: cache our pattern regexps
if condition["key"] == "content.body":
body = self._event.content.get("body", None)
if not body or not isinstance(body, str):
return False
return _glob_matches(pattern, body, word_boundary=True)
else:
haystack = self._get_value(condition["key"])
if haystack is None:
return False
return _glob_matches(pattern, haystack)
def _contains_display_name(self, display_name: str) -> bool:
if not display_name:
return False
body = self._event.content.get("body", None)
if not body or not isinstance(body, str):
return False
# Similar to _glob_matches, but do not treat display_name as a glob.
r = regex_cache.get((display_name, False, True), None)
if not r:
r1 = re.escape(display_name)
r1 = _re_word_boundary(r1)
r = re.compile(r1, flags=re.IGNORECASE)
regex_cache[(display_name, False, True)] = r
return bool(r.search(body))
def _get_value(self, dotted_key: str) -> Optional[str]:
return self._value_cache.get(dotted_key, None)
# Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches
regex_cache = LruCache(
50000, "regex_push_cache"
) # type: LruCache[Tuple[str, bool, bool], Pattern]
def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool:
"""Tests if value matches glob.
Args:
glob
value: String to test against glob.
word_boundary: Whether to match against word boundaries or entire
string. Defaults to False.
"""
try:
r = regex_cache.get((glob, True, word_boundary), None)
if not r:
r = _glob_to_re(glob, word_boundary)
regex_cache[(glob, True, word_boundary)] = r
return bool(r.search(value))
except re.error:
logger.warning("Failed to parse glob to regex: %r", glob)
return False
def _glob_to_re(glob: str, word_boundary: bool) -> Pattern:
"""Generates regex for a given glob.
Args:
glob
word_boundary: Whether to match against word boundaries or entire string.
"""
if IS_GLOB.search(glob):
r = re.escape(glob)
r = r.replace(r"\*", ".*?")
r = r.replace(r"\?", ".")
# handle [abc], [a-z] and [!a-z] style ranges.
r = GLOB_REGEX.sub(
lambda x: (
"[%s%s]" % (x.group(1) and "^" or "", x.group(2).replace(r"\\\-", "-"))
),
r,
)
if word_boundary:
r = _re_word_boundary(r)
return re.compile(r, flags=re.IGNORECASE)
else:
r = "^" + r + "$"
return re.compile(r, flags=re.IGNORECASE)
elif word_boundary:
r = re.escape(glob)
r = _re_word_boundary(r)
return re.compile(r, flags=re.IGNORECASE)
else:
r = "^" + re.escape(glob) + "$"
return re.compile(r, flags=re.IGNORECASE)
def _re_word_boundary(r: str) -> str:
"""
Adds word boundary characters to the start and end of an
expression to require that the match occur as a whole word,
but do so respecting the fact that strings starting or ending
with non-word characters will change word boundaries.
"""
# we can't use \b as it chokes on unicode. however \W seems to be okay
# as shorthand for [^0-9A-Za-z_].
return r"(^|\W)%s(\W|$)" % (r,)
def _flatten_dict(
d: Union[EventBase, dict],
prefix: Optional[List[str]] = None,
result: Optional[Dict[str, str]] = None,
) -> Dict[str, str]:
if prefix is None:
prefix = []
if result is None:
result = {}
for key, value in d.items():
if isinstance(value, str):
result[".".join(prefix + [key])] = value.lower()
elif hasattr(value, "items"):
_flatten_dict(value, prefix=(prefix + [key]), result=result)
return result
|
PYSEC-2021-135
|
synapse/util/__init__.py
|
@@ -15,6 +15,7 @@
import json
import logging
import re
+from typing import Pattern
import attr
from frozendict import frozendict
@@ -26,6 +27,9 @@
logger = logging.getLogger(__name__)
+_WILDCARD_RUN = re.compile(r"([\?\*]+)")
+
+
def _reject_invalid_json(val):
"""Do not allow Infinity, -Infinity, or NaN values in JSON."""
raise ValueError("Invalid JSON value: '%s'" % val)
@@ -158,25 +162,54 @@ def log_failure(failure, msg, consumeErrors=True):
return failure
-def glob_to_regex(glob):
+def glob_to_regex(glob: str, word_boundary: bool = False) -> Pattern:
"""Converts a glob to a compiled regex object.
- The regex is anchored at the beginning and end of the string.
-
Args:
- glob (str)
+ glob: pattern to match
+ word_boundary: If True, the pattern will be allowed to match at word boundaries
+ anywhere in the string. Otherwise, the pattern is anchored at the start and
+ end of the string.
Returns:
- re.RegexObject
+ compiled regex pattern
"""
- res = ""
- for c in glob:
- if c == "*":
- res = res + ".*"
- elif c == "?":
- res = res + "."
+
+ # Patterns with wildcards must be simplified to avoid performance cliffs
+ # - The glob `?**?**?` is equivalent to the glob `???*`
+ # - The glob `???*` is equivalent to the regex `.{3,}`
+ chunks = []
+ for chunk in _WILDCARD_RUN.split(glob):
+ # No wildcards? re.escape()
+ if not _WILDCARD_RUN.match(chunk):
+ chunks.append(re.escape(chunk))
+ continue
+
+ # Wildcards? Simplify.
+ qmarks = chunk.count("?")
+ if "*" in chunk:
+ chunks.append(".{%d,}" % qmarks)
else:
- res = res + re.escape(c)
+ chunks.append(".{%d}" % qmarks)
+
+ res = "".join(chunks)
- # \A anchors at start of string, \Z at end of string
- return re.compile(r"\A" + res + r"\Z", re.IGNORECASE)
+ if word_boundary:
+ res = re_word_boundary(res)
+ else:
+ # \A anchors at start of string, \Z at end of string
+ res = r"\A" + res + r"\Z"
+
+ return re.compile(res, re.IGNORECASE)
+
+
+def re_word_boundary(r: str) -> str:
+ """
+ Adds word boundary characters to the start and end of an
+ expression to require that the match occur as a whole word,
+ but do so respecting the fact that strings starting or ending
+ with non-word characters will change word boundaries.
+ """
+ # we can't use \b as it chokes on unicode. however \W seems to be okay
+ # as shorthand for [^0-9A-Za-z_].
+ return r"(^|\W)%s(\W|$)" % (r,)
|
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
import attr
from frozendict import frozendict
from twisted.internet import defer, task
from synapse.logging import context
logger = logging.getLogger(__name__)
def _reject_invalid_json(val):
"""Do not allow Infinity, -Infinity, or NaN values in JSON."""
raise ValueError("Invalid JSON value: '%s'" % val)
def _handle_frozendict(obj):
"""Helper for json_encoder. Makes frozendicts serializable by returning
the underlying dict
"""
if type(obj) is frozendict:
# fishing the protected dict out of the object is a bit nasty,
# but we don't really want the overhead of copying the dict.
return obj._dict
raise TypeError(
"Object of type %s is not JSON serializable" % obj.__class__.__name__
)
# A custom JSON encoder which:
# * handles frozendicts
# * produces valid JSON (no NaNs etc)
# * reduces redundant whitespace
json_encoder = json.JSONEncoder(
allow_nan=False, separators=(",", ":"), default=_handle_frozendict
)
# Create a custom decoder to reject Python extensions to JSON.
json_decoder = json.JSONDecoder(parse_constant=_reject_invalid_json)
def unwrapFirstError(failure):
# defer.gatherResults and DeferredLists wrap failures.
failure.trap(defer.FirstError)
return failure.value.subFailure
@attr.s(slots=True)
class Clock:
"""
A Clock wraps a Twisted reactor and provides utilities on top of it.
Args:
reactor: The Twisted reactor to use.
"""
_reactor = attr.ib()
@defer.inlineCallbacks
def sleep(self, seconds):
d = defer.Deferred()
with context.PreserveLoggingContext():
self._reactor.callLater(seconds, d.callback, seconds)
res = yield d
return res
def time(self):
"""Returns the current system time in seconds since epoch."""
return self._reactor.seconds()
def time_msec(self):
"""Returns the current system time in milliseconds since epoch."""
return int(self.time() * 1000)
def looping_call(self, f, msec, *args, **kwargs):
"""Call a function repeatedly.
Waits `msec` initially before calling `f` for the first time.
Note that the function will be called with no logcontext, so if it is anything
other than trivial, you probably want to wrap it in run_as_background_process.
Args:
f(function): The function to call repeatedly.
msec(float): How long to wait between calls in milliseconds.
*args: Postional arguments to pass to function.
**kwargs: Key arguments to pass to function.
"""
call = task.LoopingCall(f, *args, **kwargs)
call.clock = self._reactor
d = call.start(msec / 1000.0, now=False)
d.addErrback(log_failure, "Looping call died", consumeErrors=False)
return call
def call_later(self, delay, callback, *args, **kwargs):
"""Call something later
Note that the function will be called with no logcontext, so if it is anything
other than trivial, you probably want to wrap it in run_as_background_process.
Args:
delay(float): How long to wait in seconds.
callback(function): Function to call
*args: Postional arguments to pass to function.
**kwargs: Key arguments to pass to function.
"""
def wrapped_callback(*args, **kwargs):
with context.PreserveLoggingContext():
callback(*args, **kwargs)
with context.PreserveLoggingContext():
return self._reactor.callLater(delay, wrapped_callback, *args, **kwargs)
def cancel_call_later(self, timer, ignore_errs=False):
try:
timer.cancel()
except Exception:
if not ignore_errs:
raise
def log_failure(failure, msg, consumeErrors=True):
"""Creates a function suitable for passing to `Deferred.addErrback` that
logs any failures that occur.
Args:
msg (str): Message to log
consumeErrors (bool): If true consumes the failure, otherwise passes
on down the callback chain
Returns:
func(Failure)
"""
logger.error(
msg, exc_info=(failure.type, failure.value, failure.getTracebackObject())
)
if not consumeErrors:
return failure
def glob_to_regex(glob):
"""Converts a glob to a compiled regex object.
The regex is anchored at the beginning and end of the string.
Args:
glob (str)
Returns:
re.RegexObject
"""
res = ""
for c in glob:
if c == "*":
res = res + ".*"
elif c == "?":
res = res + "."
else:
res = res + re.escape(c)
# \A anchors at start of string, \Z at end of string
return re.compile(r"\A" + res + r"\Z", re.IGNORECASE)
|
PYSEC-2021-135
|
tests/federation/test_federation_server.py
|
@@ -74,6 +74,25 @@ def test_block_ip_literals(self):
self.assertFalse(server_matches_acl_event("[1:2::]", e))
self.assertTrue(server_matches_acl_event("1:2:3:4", e))
+ def test_wildcard_matching(self):
+ e = _create_acl_event({"allow": ["good*.com"]})
+ self.assertTrue(
+ server_matches_acl_event("good.com", e),
+ "* matches 0 characters",
+ )
+ self.assertTrue(
+ server_matches_acl_event("GOOD.COM", e),
+ "pattern is case-insensitive",
+ )
+ self.assertTrue(
+ server_matches_acl_event("good.aa.com", e),
+ "* matches several characters, including '.'",
+ )
+ self.assertFalse(
+ server_matches_acl_event("ishgood.com", e),
+ "pattern does not allow prefixes",
+ )
+
class StateQueryTests(unittest.FederatingHomeserverTestCase):
|
# Copyright 2018 New Vector Ltd
# Copyright 2019 Matrix.org Federation C.I.C
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from parameterized import parameterized
from synapse.events import make_event_from_dict
from synapse.federation.federation_server import server_matches_acl_event
from synapse.rest import admin
from synapse.rest.client.v1 import login, room
from tests import unittest
class FederationServerTests(unittest.FederatingHomeserverTestCase):
servlets = [
admin.register_servlets,
room.register_servlets,
login.register_servlets,
]
@parameterized.expand([(b"",), (b"foo",), (b'{"limit": Infinity}',)])
def test_bad_request(self, query_content):
"""
Querying with bad data returns a reasonable error code.
"""
u1 = self.register_user("u1", "pass")
u1_token = self.login("u1", "pass")
room_1 = self.helper.create_room_as(u1, tok=u1_token)
self.inject_room_member(room_1, "@user:other.example.com", "join")
"/get_missing_events/(?P<room_id>[^/]*)/?"
channel = self.make_request(
"POST",
"/_matrix/federation/v1/get_missing_events/%s" % (room_1,),
query_content,
)
self.assertEquals(400, channel.code, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_NOT_JSON")
class ServerACLsTestCase(unittest.TestCase):
def test_blacklisted_server(self):
e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]})
logging.info("ACL event: %s", e.content)
self.assertFalse(server_matches_acl_event("evil.com", e))
self.assertFalse(server_matches_acl_event("EVIL.COM", e))
self.assertTrue(server_matches_acl_event("evil.com.au", e))
self.assertTrue(server_matches_acl_event("honestly.not.evil.com", e))
def test_block_ip_literals(self):
e = _create_acl_event({"allow_ip_literals": False, "allow": ["*"]})
logging.info("ACL event: %s", e.content)
self.assertFalse(server_matches_acl_event("1.2.3.4", e))
self.assertTrue(server_matches_acl_event("1a.2.3.4", e))
self.assertFalse(server_matches_acl_event("[1:2::]", e))
self.assertTrue(server_matches_acl_event("1:2:3:4", e))
class StateQueryTests(unittest.FederatingHomeserverTestCase):
servlets = [
admin.register_servlets,
room.register_servlets,
login.register_servlets,
]
def test_without_event_id(self):
"""
Querying v1/state/<room_id> without an event ID will return the current
known state.
"""
u1 = self.register_user("u1", "pass")
u1_token = self.login("u1", "pass")
room_1 = self.helper.create_room_as(u1, tok=u1_token)
self.inject_room_member(room_1, "@user:other.example.com", "join")
channel = self.make_request(
"GET", "/_matrix/federation/v1/state/%s" % (room_1,)
)
self.assertEquals(200, channel.code, channel.result)
self.assertEqual(
channel.json_body["room_version"],
self.hs.config.default_room_version.identifier,
)
members = set(
map(
lambda x: x["state_key"],
filter(
lambda x: x["type"] == "m.room.member", channel.json_body["pdus"]
),
)
)
self.assertEqual(members, {"@user:other.example.com", u1})
self.assertEqual(len(channel.json_body["pdus"]), 6)
def test_needs_to_be_in_room(self):
"""
Querying v1/state/<room_id> requires the server
be in the room to provide data.
"""
u1 = self.register_user("u1", "pass")
u1_token = self.login("u1", "pass")
room_1 = self.helper.create_room_as(u1, tok=u1_token)
channel = self.make_request(
"GET", "/_matrix/federation/v1/state/%s" % (room_1,)
)
self.assertEquals(403, channel.code, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
def _create_acl_event(content):
return make_event_from_dict(
{
"room_id": "!a:b",
"event_id": "$a:b",
"type": "m.room.server_acls",
"sender": "@a:b",
"content": content,
}
)
|
PYSEC-2021-135
|
tests/push/test_push_rule_evaluator.py
|
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Any, Dict
+
from synapse.api.room_versions import RoomVersions
from synapse.events import FrozenEvent
from synapse.push import push_rule_evaluator
@@ -66,6 +68,170 @@ def test_display_name(self):
# A display name with spaces should work fine.
self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar"))
+ def _assert_matches(
+ self, condition: Dict[str, Any], content: Dict[str, Any], msg=None
+ ) -> None:
+ evaluator = self._get_evaluator(content)
+ self.assertTrue(evaluator.matches(condition, "@user:test", "display_name"), msg)
+
+ def _assert_not_matches(
+ self, condition: Dict[str, Any], content: Dict[str, Any], msg=None
+ ) -> None:
+ evaluator = self._get_evaluator(content)
+ self.assertFalse(
+ evaluator.matches(condition, "@user:test", "display_name"), msg
+ )
+
+ def test_event_match_body(self):
+ """Check that event_match conditions on content.body work as expected"""
+
+ # if the key is `content.body`, the pattern matches substrings.
+
+ # non-wildcards should match
+ condition = {
+ "kind": "event_match",
+ "key": "content.body",
+ "pattern": "foobaz",
+ }
+ self._assert_matches(
+ condition,
+ {"body": "aaa FoobaZ zzz"},
+ "patterns should match and be case-insensitive",
+ )
+ self._assert_not_matches(
+ condition,
+ {"body": "aa xFoobaZ yy"},
+ "pattern should only match at word boundaries",
+ )
+ self._assert_not_matches(
+ condition,
+ {"body": "aa foobazx yy"},
+ "pattern should only match at word boundaries",
+ )
+
+ # wildcards should match
+ condition = {
+ "kind": "event_match",
+ "key": "content.body",
+ "pattern": "f?o*baz",
+ }
+
+ self._assert_matches(
+ condition,
+ {"body": "aaa FoobarbaZ zzz"},
+ "* should match string and pattern should be case-insensitive",
+ )
+ self._assert_matches(
+ condition, {"body": "aa foobaz yy"}, "* should match 0 characters"
+ )
+ self._assert_not_matches(
+ condition, {"body": "aa fobbaz yy"}, "? should not match 0 characters"
+ )
+ self._assert_not_matches(
+ condition, {"body": "aa fiiobaz yy"}, "? should not match 2 characters"
+ )
+ self._assert_not_matches(
+ condition,
+ {"body": "aa xfooxbaz yy"},
+ "pattern should only match at word boundaries",
+ )
+ self._assert_not_matches(
+ condition,
+ {"body": "aa fooxbazx yy"},
+ "pattern should only match at word boundaries",
+ )
+
+ # test backslashes
+ condition = {
+ "kind": "event_match",
+ "key": "content.body",
+ "pattern": r"f\oobaz",
+ }
+ self._assert_matches(
+ condition,
+ {"body": r"F\oobaz"},
+ "backslash should match itself",
+ )
+ condition = {
+ "kind": "event_match",
+ "key": "content.body",
+ "pattern": r"f\?obaz",
+ }
+ self._assert_matches(
+ condition,
+ {"body": r"F\oobaz"},
+ r"? after \ should match any character",
+ )
+
+ def test_event_match_non_body(self):
+ """Check that event_match conditions on other keys work as expected"""
+
+ # if the key is anything other than 'content.body', the pattern must match the
+ # whole value.
+
+ # non-wildcards should match
+ condition = {
+ "kind": "event_match",
+ "key": "content.value",
+ "pattern": "foobaz",
+ }
+ self._assert_matches(
+ condition,
+ {"value": "FoobaZ"},
+ "patterns should match and be case-insensitive",
+ )
+ self._assert_not_matches(
+ condition,
+ {"value": "xFoobaZ"},
+ "pattern should only match at the start/end of the value",
+ )
+ self._assert_not_matches(
+ condition,
+ {"value": "FoobaZz"},
+ "pattern should only match at the start/end of the value",
+ )
+
+ # wildcards should match
+ condition = {
+ "kind": "event_match",
+ "key": "content.value",
+ "pattern": "f?o*baz",
+ }
+ self._assert_matches(
+ condition,
+ {"value": "FoobarbaZ"},
+ "* should match string and pattern should be case-insensitive",
+ )
+ self._assert_matches(
+ condition, {"value": "foobaz"}, "* should match 0 characters"
+ )
+ self._assert_not_matches(
+ condition, {"value": "fobbaz"}, "? should not match 0 characters"
+ )
+ self._assert_not_matches(
+ condition, {"value": "fiiobaz"}, "? should not match 2 characters"
+ )
+ self._assert_not_matches(
+ condition,
+ {"value": "xfooxbaz"},
+ "pattern should only match at the start/end of the value",
+ )
+ self._assert_not_matches(
+ condition,
+ {"value": "fooxbazx"},
+ "pattern should only match at the start/end of the value",
+ )
+ self._assert_not_matches(
+ condition,
+ {"value": "x\nfooxbaz"},
+ "pattern should not match after a newline",
+ )
+ self._assert_not_matches(
+ condition,
+ {"value": "fooxbaz\nx"},
+ "pattern should not match before a newline",
+ )
+
def test_no_body(self):
"""Not having a body shouldn't break the evaluator."""
evaluator = self._get_evaluator({})
|
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.room_versions import RoomVersions
from synapse.events import FrozenEvent
from synapse.push import push_rule_evaluator
from synapse.push.push_rule_evaluator import PushRuleEvaluatorForEvent
from tests import unittest
class PushRuleEvaluatorTestCase(unittest.TestCase):
def _get_evaluator(self, content):
event = FrozenEvent(
{
"event_id": "$event_id",
"type": "m.room.history_visibility",
"sender": "@user:test",
"state_key": "",
"room_id": "#room:test",
"content": content,
},
RoomVersions.V1,
)
room_member_count = 0
sender_power_level = 0
power_levels = {}
return PushRuleEvaluatorForEvent(
event, room_member_count, sender_power_level, power_levels
)
def test_display_name(self):
"""Check for a matching display name in the body of the event."""
evaluator = self._get_evaluator({"body": "foo bar baz"})
condition = {
"kind": "contains_display_name",
}
# Blank names are skipped.
self.assertFalse(evaluator.matches(condition, "@user:test", ""))
# Check a display name that doesn't match.
self.assertFalse(evaluator.matches(condition, "@user:test", "not found"))
# Check a display name which matches.
self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
# A display name that matches, but not a full word does not result in a match.
self.assertFalse(evaluator.matches(condition, "@user:test", "ba"))
# A display name should not be interpreted as a regular expression.
self.assertFalse(evaluator.matches(condition, "@user:test", "ba[rz]"))
# A display name with spaces should work fine.
self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar"))
def test_no_body(self):
"""Not having a body shouldn't break the evaluator."""
evaluator = self._get_evaluator({})
condition = {
"kind": "contains_display_name",
}
self.assertFalse(evaluator.matches(condition, "@user:test", "foo"))
def test_invalid_body(self):
"""A non-string body should not break the evaluator."""
condition = {
"kind": "contains_display_name",
}
for body in (1, True, {"foo": "bar"}):
evaluator = self._get_evaluator({"body": body})
self.assertFalse(evaluator.matches(condition, "@user:test", "foo"))
def test_tweaks_for_actions(self):
"""
This tests the behaviour of tweaks_for_actions.
"""
actions = [
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight"},
"notify",
]
self.assertEqual(
push_rule_evaluator.tweaks_for_actions(actions),
{"sound": "default", "highlight": True},
)
|
PYSEC-2021-135
|
tests/tests.py
|
@@ -88,8 +88,8 @@ def test_list(self):
user_agent='Firefox')
response = self.client.get(reverse('user_sessions:session_list'))
self.assertContains(response, 'Active Sessions')
- self.assertContains(response, 'End Session', 3)
self.assertContains(response, 'Firefox')
+ self.assertNotContains(response, 'ABC123')
def test_delete(self):
session_key = self.client.cookies[settings.SESSION_COOKIE_NAME].value
|
import sys
from datetime import datetime, timedelta
from unittest import skipUnless
import django
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.models import User
from django.contrib.sessions.backends.base import CreateError
from django.core.management import call_command
from django.test import TestCase, TransactionTestCase
from django.test.utils import modify_settings, override_settings
from django.urls import reverse
from django.utils.timezone import now
from user_sessions.backends.db import SessionStore
from user_sessions.models import Session
from user_sessions.templatetags.user_sessions import device, location
from user_sessions.utils.tests import Client
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from unittest.mock import patch
except ImportError:
from mock import patch
try:
from django.contrib.gis.geoip2 import GeoIP2
geoip = GeoIP2()
geoip_msg = None
except Exception as error_geoip2:
try:
from django.contrib.gis.geoip import GeoIP
geoip = GeoIP()
geoip_msg = None
except Exception as error_geoip:
geoip = None
geoip_msg = str(error_geoip2) + " and " + str(error_geoip)
class MiddlewareTest(TestCase):
def test_unmodified_session(self):
self.client.get('/', HTTP_USER_AGENT='Python/2.7')
self.assertNotIn(settings.SESSION_COOKIE_NAME, self.client.cookies)
def test_modify_session(self):
self.client.get('/modify_session/', HTTP_USER_AGENT='Python/2.7')
self.assertIn(settings.SESSION_COOKIE_NAME, self.client.cookies)
session = Session.objects.get(
pk=self.client.cookies[settings.SESSION_COOKIE_NAME].value
)
self.assertEqual(session.user_agent, 'Python/2.7')
self.assertEqual(session.ip, '127.0.0.1')
def test_login(self):
admin_login_url = reverse('admin:login')
user = User.objects.create_superuser('bouke', '', 'secret')
response = self.client.post(admin_login_url,
data={
'username': 'bouke',
'password': 'secret',
'this_is_the_login_form': '1',
'next': '/admin/'},
HTTP_USER_AGENT='Python/2.7')
self.assertRedirects(response, '/admin/')
session = Session.objects.get(
pk=self.client.cookies[settings.SESSION_COOKIE_NAME].value
)
self.assertEqual(user, session.user)
def test_long_ua(self):
self.client.get('/modify_session/',
HTTP_USER_AGENT=''.join('a' for _ in range(400)))
class ViewsTest(TestCase):
client_class = Client
def setUp(self):
self.user = User.objects.create_user('bouke', '', 'secret')
assert self.client.login(username='bouke', password='secret')
def test_list(self):
self.user.session_set.create(session_key='ABC123', ip='127.0.0.1',
expire_date=datetime.now() + timedelta(days=1),
user_agent='Firefox')
response = self.client.get(reverse('user_sessions:session_list'))
self.assertContains(response, 'Active Sessions')
self.assertContains(response, 'End Session', 3)
self.assertContains(response, 'Firefox')
def test_delete(self):
session_key = self.client.cookies[settings.SESSION_COOKIE_NAME].value
response = self.client.post(reverse('user_sessions:session_delete',
args=[session_key]))
self.assertRedirects(response, '/')
def test_delete_all_other(self):
self.user.session_set.create(ip='127.0.0.1', expire_date=datetime.now() + timedelta(days=1))
self.assertEqual(self.user.session_set.count(), 2)
response = self.client.post(reverse('user_sessions:session_delete_other'))
self.assertRedirects(response, reverse('user_sessions:session_list'))
self.assertEqual(self.user.session_set.count(), 1)
def test_delete_some_other(self):
other = self.user.session_set.create(session_key='OTHER', ip='127.0.0.1',
expire_date=datetime.now() + timedelta(days=1))
self.assertEqual(self.user.session_set.count(), 2)
response = self.client.post(reverse('user_sessions:session_delete',
args=[other.session_key]))
self.assertRedirects(response, reverse('user_sessions:session_list'))
self.assertEqual(self.user.session_set.count(), 1)
class AdminTest(TestCase):
client_class = Client
def setUp(self):
User.objects.create_superuser('bouke', '', 'secret')
assert self.client.login(username='bouke', password='secret')
expired = SessionStore(user_agent='Python/2.5', ip='20.13.1.1')
expired.set_expiry(-365 * 86400)
expired.save()
unexpired = SessionStore(user_agent='Python/2.7', ip='1.1.1.1')
unexpired.save()
self.admin_url = reverse('admin:user_sessions_session_changelist')
def test_list(self):
response = self.client.get(self.admin_url)
self.assertContains(response, 'Select session to change')
self.assertContains(response, '127.0.0.1')
self.assertContains(response, '20.13.1.1')
self.assertContains(response, '1.1.1.1')
def test_search(self):
response = self.client.get(self.admin_url, {'q': 'bouke'})
self.assertContains(response, '127.0.0.1')
self.assertNotContains(response, '20.13.1.1')
self.assertNotContains(response, '1.1.1.1')
def test_mine(self):
my_sessions = '%s?%s' % (self.admin_url, urlencode({'owner': 'my'}))
response = self.client.get(my_sessions)
self.assertContains(response, '127.0.0.1')
self.assertNotContains(response, '1.1.1.1')
def test_expired(self):
expired = '%s?%s' % (self.admin_url, urlencode({'active': '0'}))
response = self.client.get(expired)
self.assertContains(response, '20.13.1.1')
self.assertNotContains(response, '1.1.1.1')
def test_unexpired(self):
unexpired = '%s?%s' % (self.admin_url, urlencode({'active': '1'}))
response = self.client.get(unexpired)
self.assertContains(response, '1.1.1.1')
self.assertNotContains(response, '20.13.1.1')
class SessionStoreTest(TestCase):
def setUp(self):
self.store = SessionStore(user_agent='Python/2.7', ip='127.0.0.1')
User.objects.create_user('bouke', '', 'secret', id=1)
def test_untouched_init(self):
self.assertFalse(self.store.modified)
self.assertFalse(self.store.accessed)
def test_auth_session_key(self):
self.assertFalse(auth.SESSION_KEY in self.store)
self.assertFalse(self.store.modified)
self.assertTrue(self.store.accessed)
self.store.get(auth.SESSION_KEY)
self.assertFalse(self.store.modified)
self.store[auth.SESSION_KEY] = 1
self.assertTrue(self.store.modified)
def test_save(self):
self.store[auth.SESSION_KEY] = 1
self.store.save()
session = Session.objects.get(pk=self.store.session_key)
self.assertEqual(session.user_agent, 'Python/2.7')
self.assertEqual(session.ip, '127.0.0.1')
self.assertEqual(session.user_id, 1)
self.assertAlmostEqual(now(), session.last_activity,
delta=timedelta(seconds=5))
def test_load_unmodified(self):
self.store[auth.SESSION_KEY] = 1
self.store.save()
store2 = SessionStore(session_key=self.store.session_key,
user_agent='Python/2.7', ip='127.0.0.1')
store2.load()
self.assertEqual(store2.user_agent, 'Python/2.7')
self.assertEqual(store2.ip, '127.0.0.1')
self.assertEqual(store2.user_id, 1)
self.assertEqual(store2.modified, False)
def test_load_modified(self):
self.store[auth.SESSION_KEY] = 1
self.store.save()
store2 = SessionStore(session_key=self.store.session_key,
user_agent='Python/3.3', ip='8.8.8.8')
store2.load()
self.assertEqual(store2.user_agent, 'Python/3.3')
self.assertEqual(store2.ip, '8.8.8.8')
self.assertEqual(store2.user_id, 1)
self.assertEqual(store2.modified, True)
def test_duplicate_create(self):
s1 = SessionStore(session_key='DUPLICATE', user_agent='Python/2.7', ip='127.0.0.1')
s1.create()
s2 = SessionStore(session_key='DUPLICATE', user_agent='Python/2.7', ip='127.0.0.1')
s2.create()
self.assertNotEqual(s1.session_key, s2.session_key)
s3 = SessionStore(session_key=s1.session_key, user_agent='Python/2.7', ip='127.0.0.1')
with self.assertRaises(CreateError):
s3.save(must_create=True)
def test_delete(self):
# not persisted, should just return
self.store.delete()
# create, then delete
self.store.create()
session_key = self.store.session_key
self.store.delete()
# non-existing sessions, should not raise
self.store.delete()
self.store.delete(session_key)
def test_clear(self):
"""
Clearing the session should clear all non-browser information
"""
self.store[auth.SESSION_KEY] = 1
self.store.clear()
self.store.save()
session = Session.objects.get(pk=self.store.session_key)
self.assertEqual(session.user_id, None)
class ModelTest(TestCase):
def test_get_decoded(self):
User.objects.create_user('bouke', '', 'secret', id=1)
store = SessionStore(user_agent='Python/2.7', ip='127.0.0.1')
store[auth.SESSION_KEY] = 1
store['foo'] = 'bar'
store.save()
session = Session.objects.get(pk=store.session_key)
self.assertEqual(session.get_decoded(),
{'foo': 'bar', auth.SESSION_KEY: 1})
def test_very_long_ua(self):
ua = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; ' \
'Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; ' \
'.NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; ' \
'InfoPath.3; ms-office; MSOffice 14)'
store = SessionStore(user_agent=ua, ip='127.0.0.1')
store.save()
session = Session.objects.get(pk=store.session_key)
self.assertEqual(session.user_agent, ua[:200])
class ClientTest(TestCase):
def test_invalid_login(self):
client = Client()
self.assertFalse(client.login())
def test_restore_session(self):
store = SessionStore(user_agent='Python/2.7', ip='127.0.0.1')
store['foo'] = 'bar'
store.save()
client = Client()
client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
User.objects.create_user('bouke', '', 'secret')
assert client.login(username='bouke', password='secret')
self.assertEqual(client.session['foo'], 'bar')
def test_login_logout(self):
client = Client()
User.objects.create_user('bouke', '', 'secret')
assert client.login(username='bouke', password='secret')
assert settings.SESSION_COOKIE_NAME in client.cookies
client.logout()
assert settings.SESSION_COOKIE_NAME not in client.cookies
# should not raise
client.logout()
@patch('django.contrib.auth.signals.user_logged_in.send')
def test_login_signal(self, mock_user_logged_in):
client = Client()
User.objects.create_user('bouke', '', 'secret')
assert client.login(username='bouke', password='secret')
assert mock_user_logged_in.called
request = mock_user_logged_in.call_args[1]['request']
assert getattr(request, 'user', None) is not None
@override_settings(INSTALLED_APPS=())
def test_no_session(self):
self.assertIsNone(Client().session)
class LocationTemplateFilterTest(TestCase):
@override_settings(GEOIP_PATH=None)
def test_no_location(self):
self.assertEqual(location('127.0.0.1'), None)
@skipUnless(geoip, geoip_msg)
def test_locations(self):
self.assertEqual('United States', location('8.8.8.8'))
self.assertEqual('San Diego, United States', location('44.55.66.77'))
class DeviceTemplateFilterTest(TestCase):
def test_ie(self):
self.assertEqual(
'Internet Explorer on Windows XP',
device('Mozilla/4.0 (Windows; MSIE 6.0; Windows NT 5.1; SV1; '
'.NET CLR 2.0.50727)')
)
self.assertEqual(
'Internet Explorer on Windows Vista',
device('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; '
'Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322;'
' InfoPath.2; .NET CLR 3.5.21022; .NET CLR 3.5.30729; '
'MS-RTC LM 8; OfficeLiveConnector.1.4; OfficeLivePatch.1.3;'
' .NET CLR 3.0.30729)')
)
self.assertEqual(
'Internet Explorer on Windows 7',
device('Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; '
'Trident/6.0)')
)
self.assertEqual(
'Internet Explorer on Windows 8',
device('Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; '
'Win64; x64; Trident/6.0)')
)
self.assertEqual(
'Internet Explorer on Windows 8.1',
device('Mozilla/5.0 (IE 11.0; Windows NT 6.3; Trident/7.0; '
'.NET4.0E; .NET4.0C; rv:11.0) like Gecko')
)
def test_apple(self):
self.assertEqual(
'Safari on iPad',
device('Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; ja-jp) '
'AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 '
'Mobile/8C148 Safari/6533.18.5')
)
self.assertEqual(
'Safari on iPhone',
device('Mozilla/5.0 (iPhone; CPU iPhone OS 7_0 like Mac OS X) '
'AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 '
'Mobile/11A465 Safari/9537.53')
)
self.assertEqual(
'Safari on OS X',
device('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) '
'AppleWebKit/536.26.17 (KHTML, like Gecko) Version/6.0.2 '
'Safari/536.26.17')
)
def test_android(self):
# androids identify themselves as Safari to get the good stuff
self.assertEqual(
'Safari on Android',
device('Mozilla/5.0 (Linux; U; Android 1.5; de-de; HTC Magic '
'Build/CRB17) AppleWebKit/528.5+ (KHTML, like Gecko) '
'Version/3.1.2 Mobile Safari/525.20.1')
)
def test_firefox(self):
self.assertEqual(
'Firefox on Windows 7',
device('Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:22.0) '
'Gecko/20130328 Firefox/22.0')
)
def test_chrome(self):
self.assertEqual(
'Chrome on Windows 8.1',
device('Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 ('
'KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36')
)
def test_firefox_only(self):
self.assertEqual("Firefox", device("Not a legit OS Firefox/51.0"))
def test_chrome_only(self):
self.assertEqual("Chrome", device("Not a legit OS Chrome/54.0.32"))
def test_safari_only(self):
self.assertEqual("Safari", device("Not a legit OS Safari/5.2"))
def test_linux_only(self):
self.assertEqual("Linux", device("Linux not a real browser/10.3"))
def test_ipad_only(self):
self.assertEqual("iPad", device("iPad not a real browser/10.3"))
def test_iphone_only(self):
self.assertEqual("iPhone", device("iPhone not a real browser/10.3"))
def test_windowsxp_only(self):
self.assertEqual("Windows XP", device("NT 5.1 not a real browser/10.3"))
def test_windowsvista_only(self):
self.assertEqual("Windows Vista", device("NT 6.0 not a real browser/10.3"))
def test_windows7_only(self):
self.assertEqual("Windows 7", device("NT 6.1 not a real browser/10.3"))
def test_windows8_only(self):
self.assertEqual("Windows 8", device("NT 6.2 not a real browser/10.3"))
def test_windows81_only(self):
self.assertEqual("Windows 8.1", device("NT 6.3 not a real browser/10.3"))
def test_windows_only(self):
self.assertEqual("Windows", device("Windows not a real browser/10.3"))
class ClearsessionsCommandTest(TestCase):
def test_can_call(self):
Session.objects.create(expire_date=datetime.now() - timedelta(days=1),
ip='127.0.0.1')
call_command('clearsessions')
self.assertEqual(Session.objects.count(), 0)
class MigratesessionsCommandTest(TransactionTestCase):
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sessions'})
def test_migrate_from_login(self):
from django.contrib.sessions.models import Session as DjangoSession
from django.contrib.sessions.backends.db import SessionStore as DjangoSessionStore
try:
call_command('migrate', 'sessions')
call_command('clearsessions')
user = User.objects.create_user('bouke', '', 'secret')
session = DjangoSessionStore()
session['_auth_user_id'] = user.id
session.save()
self.assertEqual(Session.objects.count(), 0)
self.assertEqual(DjangoSession.objects.count(), 1)
call_command('migratesessions')
new_sessions = list(Session.objects.all())
self.assertEqual(len(new_sessions), 1)
self.assertEqual(new_sessions[0].user, user)
self.assertEqual(new_sessions[0].ip, '127.0.0.1')
finally:
call_command('migrate', 'sessions', 'zero')
|
PYSEC-2020-230
|
rdiffweb/controller/page_mfa.py
|
@@ -105,10 +105,10 @@ def send_code(self):
"Multi-factor authentication is enabled for your account, but your account does not have a valid email address to send the verification code to. Check your account settings with your administrator."
)
)
- else:
- code = cherrypy.tools.auth_mfa.generate_code()
- body = self.app.templates.compile_template(
- "email_mfa.html", **{"header_name": self.app.cfg.header_name, 'user': userobj, 'code': code}
- )
- cherrypy.engine.publish('queue_mail', to=userobj.email, subject=_("Your verification code"), message=body)
- flash(_("A new verification code has been sent to your email."))
+ return
+ code = cherrypy.tools.auth_mfa.generate_code()
+ body = self.app.templates.compile_template(
+ "email_verification_code.html", **{"header_name": self.app.cfg.header_name, 'user': userobj, 'code': code}
+ )
+ cherrypy.engine.publish('queue_mail', to=userobj.email, subject=_("Your verification code"), message=body)
+ flash(_("A new verification code has been sent to your email."))
|
# -*- coding: utf-8 -*-
# rdiffweb, A web interface to rdiff-backup repositories
# Copyright (C) 2012-2021 rdiffweb contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import cherrypy
from wtforms.fields import BooleanField, StringField, SubmitField
from rdiffweb.controller import Controller, flash
from rdiffweb.controller.form import CherryForm
from rdiffweb.tools.auth_form import LOGIN_PERSISTENT
from rdiffweb.tools.i18n import gettext_lazy as _
# Define the logger
logger = logging.getLogger(__name__)
class MfaForm(CherryForm):
code = StringField(
_('Verification code'),
description=_('Enter the code to verify your identity.'),
render_kw={
"class": "form-control-lg",
"placeholder": _('Enter verification code here'),
"autocomplete": "off",
"autocorrect": "off",
"autofocus": "autofocus",
},
)
persistent = BooleanField(
_('Remember me'),
default=lambda: cherrypy.session.get(LOGIN_PERSISTENT, False),
)
submit = SubmitField(
_('Sign in'),
render_kw={"class": "btn-primary btn-lg btn-block"},
)
resend_code = SubmitField(
_('Resend code to my email'),
render_kw={"class": "btn-link btn-sm btn-block"},
)
def validate_code(self, field):
# Code is required when submit.
if self.submit.data:
if not self.code.data:
raise ValueError(_('Invalid verification code.'))
# Validate verification code.
if not cherrypy.tools.auth_mfa.verify_code(code=self.code.data, persistent=self.persistent.data):
raise ValueError(_('Invalid verification code.'))
def validate(self, extra_validators=None):
if not (self.submit.data or self.resend_code.data):
raise ValueError(_('Invalid operation'))
return super().validate()
class MfaPage(Controller):
@cherrypy.expose()
@cherrypy.tools.ratelimit(methods=['POST'])
def index(self, **kwargs):
form = MfaForm()
# Validate MFA
if form.is_submitted():
if form.validate():
if form.submit.data:
cherrypy.tools.auth_mfa.redirect_to_original_url()
elif form.resend_code.data:
self.send_code()
if cherrypy.tools.auth_mfa.is_code_expired():
# Send verification code if previous code expired.
self.send_code()
params = {
'form': form,
}
# Add welcome message to params. Try to load translated message.
welcome_msg = self.app.cfg.welcome_msg
if welcome_msg:
params["welcome_msg"] = welcome_msg.get('')
if hasattr(cherrypy.response, 'i18n'):
locale = cherrypy.response.i18n.locale.language
params["welcome_msg"] = welcome_msg.get(locale, params["welcome_msg"])
return self._compile_template("mfa.html", **params)
def send_code(self):
# Send verification code by email
userobj = cherrypy.serving.request.currentuser
if not userobj.email:
flash(
_(
"Multi-factor authentication is enabled for your account, but your account does not have a valid email address to send the verification code to. Check your account settings with your administrator."
)
)
else:
code = cherrypy.tools.auth_mfa.generate_code()
body = self.app.templates.compile_template(
"email_mfa.html", **{"header_name": self.app.cfg.header_name, 'user': userobj, 'code': code}
)
cherrypy.engine.publish('queue_mail', to=userobj.email, subject=_("Your verification code"), message=body)
flash(_("A new verification code has been sent to your email."))
|
PYSEC-2022-42978
|
rdiffweb/controller/page_pref_mfa.py
|
@@ -126,7 +126,7 @@ def send_code(self):
return
code = cherrypy.tools.auth_mfa.generate_code()
body = self.app.templates.compile_template(
- "email_mfa.html", **{"header_name": self.app.cfg.header_name, 'user': userobj, 'code': code}
+ "email_verification_code.html", **{"header_name": self.app.cfg.header_name, 'user': userobj, 'code': code}
)
cherrypy.engine.publish('queue_mail', to=userobj.email, subject=_("Your verification code"), message=body)
flash(_("A new verification code has been sent to your email."))
|
# -*- coding: utf-8 -*-
# rdiffweb, A web interface to rdiff-backup repositories
# Copyright (C) 2012-2021 rdiffweb contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cherrypy
from wtforms.fields import SelectField, StringField, SubmitField
from wtforms.widgets import HiddenInput
from rdiffweb.controller import Controller, flash
from rdiffweb.controller.form import CherryForm
from rdiffweb.core.model import UserObject
from rdiffweb.tools.i18n import gettext_lazy as _
class AbstractMfaForm(CherryForm):
def __init__(self, obj, **kwargs):
assert obj
super().__init__(obj=obj, **kwargs)
# Keep only one of the enable or disable button
if obj.mfa:
self.enable_mfa.widget = HiddenInput()
self.enable_mfa.data = ''
else:
self.disable_mfa.widget = HiddenInput()
self.disable_mfa.data = ''
class MfaStatusForm(AbstractMfaForm):
mfa = SelectField(
_('Two-Factor Authentication (2FA) Status'),
coerce=int,
choices=[
(UserObject.DISABLED_MFA, _("Disabled")),
(UserObject.ENABLED_MFA, _("Enabled")),
],
render_kw={'readonly': True, 'disabled': True, 'data-beta': '1'},
)
enable_mfa = SubmitField(_('Enable Two-Factor Authentication'), render_kw={"class": "btn-success"})
disable_mfa = SubmitField(_('Disable Two-Factor Authentication'), render_kw={"class": "btn-warning"})
class MfaToggleForm(AbstractMfaForm):
code = StringField(
_('Verification code'),
render_kw={
"placeholder": _('Enter verification code here'),
"autocomplete": "off",
"autocorrect": "off",
"autofocus": "autofocus",
},
)
enable_mfa = SubmitField(_('Enable Two-Factor Authentication'), render_kw={"class": "btn-success"})
disable_mfa = SubmitField(_('Disable Two-Factor Authentication'), render_kw={"class": "btn-warning"})
resend_code = SubmitField(
_('Resend code to my email'),
render_kw={"class": "btn-link"},
)
@property
def app(self):
return cherrypy.request.app
def populate_obj(self, userobj):
# Enable or disable MFA only when a code is provided.
if self.enable_mfa.data:
userobj.mfa = UserObject.ENABLED_MFA
flash(_("Two-Factor authentication enabled successfully."), level='success')
elif self.disable_mfa.data:
userobj.mfa = UserObject.DISABLED_MFA
flash(_("Two-Factor authentication disabled successfully."), level='success')
def validate_code(self, field):
# Code is required for enable_mfa and disable_mfa
if self.enable_mfa.data or self.disable_mfa.data:
if not self.code.data:
raise ValueError(_("Enter the verification code to continue."))
# Validate code
if not cherrypy.tools.auth_mfa.verify_code(self.code.data, False):
raise ValueError(_("Invalid verification code."))
def validate(self, extra_validators=None):
if not (self.enable_mfa.data or self.disable_mfa.data or self.resend_code.data):
raise ValueError(_('Invalid operation'))
return super().validate()
class PagePrefMfa(Controller):
@cherrypy.expose
def default(self, action=None, **kwargs):
form = MfaToggleForm(obj=self.app.currentuser)
if form.is_submitted():
if form.validate():
if form.resend_code.data:
self.send_code()
elif form.enable_mfa.data or form.disable_mfa.data:
form.populate_obj(self.app.currentuser)
form = MfaStatusForm(obj=self.app.currentuser)
# Send verification code if previous code expired.
elif cherrypy.tools.auth_mfa.is_code_expired():
self.send_code()
else:
form = MfaStatusForm(obj=self.app.currentuser)
params = {
'form': form,
}
return self._compile_template("prefs_mfa.html", **params)
def send_code(self):
userobj = self.app.currentuser
if not userobj.email:
flash(_("To continue, you must set up an email address for your account."), level='warning')
return
code = cherrypy.tools.auth_mfa.generate_code()
body = self.app.templates.compile_template(
"email_mfa.html", **{"header_name": self.app.cfg.header_name, 'user': userobj, 'code': code}
)
cherrypy.engine.publish('queue_mail', to=userobj.email, subject=_("Your verification code"), message=body)
flash(_("A new verification code has been sent to your email."))
|
PYSEC-2022-42978
|
rdiffweb/controller/tests/test_page_prefs_mfa.py
|
@@ -15,7 +15,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from unittest.mock import MagicMock
+from unittest.mock import ANY, MagicMock
import cherrypy
from parameterized import parameterized
@@ -34,47 +34,49 @@ def setUp(self):
userobj = UserObject.get_user(self.USERNAME)
userobj.email = '[email protected]'
userobj.add()
+ # Register a listener on email
+ self.listener = MagicMock()
+ cherrypy.engine.subscribe('queue_mail', self.listener.queue_email, priority=50)
+
+ def tearDown(self):
+ cherrypy.engine.unsubscribe('queue_mail', self.listener.queue_email)
+ return super().tearDown()
def _set_mfa(self, mfa):
# Define mfa for user
userobj = UserObject.get_user(self.USERNAME)
userobj.mfa = mfa
userobj.add()
+ # Reset mock.
+ self.listener.queue_email.reset_mock()
+ # Leave to disable mfa
if mfa == UserObject.DISABLED_MFA:
return
# Generate a code for login if required
- self.listener = MagicMock()
- cherrypy.engine.subscribe('queue_mail', self.listener.queue_email, priority=50)
- try:
- self.getPage("/mfa/")
- self.assertStatus(200)
- self.assertInBody("A new verification code has been sent to your email.")
- # Extract code from email between <strong> and </strong>
- self.listener.queue_email.assert_called_once()
- message = self.listener.queue_email.call_args[1]['message']
- code = message.split('<strong>', 1)[1].split('</strong>')[0]
- # Login to MFA
- self.getPage("/mfa/", method='POST', body={'code': code, 'submit': '1'})
- self.assertStatus(303)
- finally:
- cherrypy.engine.unsubscribe('queue_mail', self.listener.queue_email)
+ self.getPage("/mfa/")
+ self.assertStatus(200)
+ self.assertInBody("A new verification code has been sent to your email.")
+ # Extract code from email between <strong> and </strong>
+ self.listener.queue_email.assert_called_once()
+ message = self.listener.queue_email.call_args[1]['message']
+ code = message.split('<strong>', 1)[1].split('</strong>')[0]
+ # Login to MFA
+ self.getPage("/mfa/", method='POST', body={'code': code, 'submit': '1'})
+ self.assertStatus(303)
+ # Clear mock.
+ self.listener.queue_email.reset_mock()
def _get_code(self, action):
assert action in ['enable_mfa', 'disable_mfa', 'resend_code']
- # Register an email listeer to capture email send
- self.listener = MagicMock()
- cherrypy.engine.subscribe('queue_mail', self.listener.queue_email, priority=50)
# Query MFA page to generate a code
- try:
- self.getPage("/prefs/mfa", method='POST', body={action: '1'})
- self.assertStatus(200)
- self.assertInBody("A new verification code has been sent to your email.")
- # Extract code from email between <strong> and </strong>
- self.listener.queue_email.assert_called_once()
- message = self.listener.queue_email.call_args[1]['message']
- return message.split('<strong>', 1)[1].split('</strong>')[0]
- finally:
- cherrypy.engine.unsubscribe('queue_mail', self.listener.queue_email)
+ self.getPage("/prefs/mfa", method='POST', body={action: '1'})
+ self.assertStatus(200)
+ self.assertInBody("A new verification code has been sent to your email.")
+ # Extract code from email between <strong> and </strong>
+ self.listener.queue_email.assert_called_once()
+ message = self.listener.queue_email.call_args[1]['message']
+ self.listener.queue_email.reset_mock()
+ return message.split('<strong>', 1)[1].split('</strong>')[0]
def test_get(self):
# When getting the page
@@ -84,11 +86,11 @@ def test_get(self):
@parameterized.expand(
[
- ('enable_mfa', UserObject.DISABLED_MFA, UserObject.ENABLED_MFA),
- ('disable_mfa', UserObject.ENABLED_MFA, UserObject.DISABLED_MFA),
+ ('enable_mfa', UserObject.DISABLED_MFA, UserObject.ENABLED_MFA, "Two-Factor Authentication turned on"),
+ ('disable_mfa', UserObject.ENABLED_MFA, UserObject.DISABLED_MFA, "Two-Factor Authentication turned off"),
]
)
- def test_with_valid_code(self, action, initial_mfa, expected_mfa):
+ def test_with_valid_code(self, action, initial_mfa, expected_mfa, expected_subject):
# Define mfa for user
self._set_mfa(initial_mfa)
# Given a user with email requesting a code
@@ -99,8 +101,10 @@ def test_with_valid_code(self, action, initial_mfa, expected_mfa):
self.assertStatus(200)
userobj = UserObject.get_user(self.USERNAME)
self.assertEqual(userobj.mfa, expected_mfa)
- # Then no email get sent
+ # Then no verification code get sent
self.assertNotInBody("A new verification code has been sent to your email.")
+ # Then an email confirmation get send
+ self.listener.queue_email.assert_called_once_with(to=ANY, subject=expected_subject, message=ANY)
# Then next page request is still working.
self.getPage('/')
self.assertStatus(200)
|
# -*- coding: utf-8 -*-
# rdiffweb, A web interface to rdiff-backup repositories
# Copyright (C) 2012-2021 rdiffweb contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from unittest.mock import MagicMock
import cherrypy
from parameterized import parameterized
import rdiffweb.test
from rdiffweb.core.model import UserObject
class PagePrefMfaTest(rdiffweb.test.WebCase):
login = True
def setUp(self):
super().setUp()
# Define email for all test
userobj = UserObject.get_user(self.USERNAME)
userobj.email = '[email protected]'
userobj.add()
def _set_mfa(self, mfa):
# Define mfa for user
userobj = UserObject.get_user(self.USERNAME)
userobj.mfa = mfa
userobj.add()
if mfa == UserObject.DISABLED_MFA:
return
# Generate a code for login if required
self.listener = MagicMock()
cherrypy.engine.subscribe('queue_mail', self.listener.queue_email, priority=50)
try:
self.getPage("/mfa/")
self.assertStatus(200)
self.assertInBody("A new verification code has been sent to your email.")
# Extract code from email between <strong> and </strong>
self.listener.queue_email.assert_called_once()
message = self.listener.queue_email.call_args[1]['message']
code = message.split('<strong>', 1)[1].split('</strong>')[0]
# Login to MFA
self.getPage("/mfa/", method='POST', body={'code': code, 'submit': '1'})
self.assertStatus(303)
finally:
cherrypy.engine.unsubscribe('queue_mail', self.listener.queue_email)
def _get_code(self, action):
assert action in ['enable_mfa', 'disable_mfa', 'resend_code']
# Register an email listeer to capture email send
self.listener = MagicMock()
cherrypy.engine.subscribe('queue_mail', self.listener.queue_email, priority=50)
# Query MFA page to generate a code
try:
self.getPage("/prefs/mfa", method='POST', body={action: '1'})
self.assertStatus(200)
self.assertInBody("A new verification code has been sent to your email.")
# Extract code from email between <strong> and </strong>
self.listener.queue_email.assert_called_once()
message = self.listener.queue_email.call_args[1]['message']
return message.split('<strong>', 1)[1].split('</strong>')[0]
finally:
cherrypy.engine.unsubscribe('queue_mail', self.listener.queue_email)
def test_get(self):
# When getting the page
self.getPage("/prefs/mfa")
# Then the page is return without error
self.assertStatus(200)
@parameterized.expand(
[
('enable_mfa', UserObject.DISABLED_MFA, UserObject.ENABLED_MFA),
('disable_mfa', UserObject.ENABLED_MFA, UserObject.DISABLED_MFA),
]
)
def test_with_valid_code(self, action, initial_mfa, expected_mfa):
# Define mfa for user
self._set_mfa(initial_mfa)
# Given a user with email requesting a code
code = self._get_code(action=action)
# When sending a valid code
self.getPage("/prefs/mfa", method='POST', body={action: '1', 'code': code})
# Then mfa get enabled or disable accordingly
self.assertStatus(200)
userobj = UserObject.get_user(self.USERNAME)
self.assertEqual(userobj.mfa, expected_mfa)
# Then no email get sent
self.assertNotInBody("A new verification code has been sent to your email.")
# Then next page request is still working.
self.getPage('/')
self.assertStatus(200)
@parameterized.expand(
[
('enable_mfa', UserObject.DISABLED_MFA, UserObject.DISABLED_MFA),
('disable_mfa', UserObject.ENABLED_MFA, UserObject.ENABLED_MFA),
]
)
def test_with_invalid_code(self, action, initial_mfa, expected_mfa):
# Define mfa for user
self._set_mfa(initial_mfa)
# Given a user with email requesting a code
self._get_code(action=action)
# When sending an invalid code
self.getPage("/prefs/mfa", method='POST', body={action: '1', 'code': '1234567'})
# Then mfa get enabled or disable accordingly
self.assertStatus(200)
userobj = UserObject.get_user(self.USERNAME)
self.assertEqual(userobj.mfa, expected_mfa)
# Then next page request is still working.
self.getPage('/')
self.assertStatus(200)
@parameterized.expand(
[
('enable_mfa', UserObject.DISABLED_MFA),
('disable_mfa', UserObject.ENABLED_MFA),
]
)
def test_without_email(self, action, initial_mfa):
# Define mfa for user
self._set_mfa(initial_mfa)
# Given a user without email requesting a code
userobj = UserObject.get_user(self.USERNAME)
userobj.email = ''
userobj.add()
# When trying to enable or disable mfa
self.getPage("/prefs/mfa", method='POST', body={action: '1'})
# Then an error is return to the user
self.assertStatus(200)
self.assertInBody("To continue, you must set up an email address for your account.")
@parameterized.expand(
[
(UserObject.DISABLED_MFA,),
(UserObject.ENABLED_MFA,),
]
)
def test_resend_code(self, initial_mfa):
# Define mfa for user
self._set_mfa(initial_mfa)
# When requesting a new code.
self.getPage("/prefs/mfa", method='POST', body={'resend_code': '1'})
# Then a new code get sent.
self.assertInBody("A new verification code has been sent to your email.")
|
PYSEC-2022-42978
|
rdiffweb/core/config.py
|
@@ -159,7 +159,7 @@ def get_parser():
'--emailsendchangednotification',
help='True to send notification when sensitive information get change in user profile.',
action='store_true',
- default=False,
+ default=True,
)
parser.add_argument(
|
# -*- coding: utf-8 -*-
# rdiffweb, A web interface to rdiff-backup repositories
# Copyright (C) 2012-2021 rdiffweb contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging
import re
import sys
from collections import OrderedDict
import cherrypy
import configargparse
import pkg_resources
from cherrypy import Application
# Define the logger
logger = logging.getLogger(__name__)
# Get rdiffweb version.
try:
VERSION = pkg_resources.get_distribution("rdiffweb").version
except pkg_resources.DistributionNotFound:
VERSION = "DEV"
def get_parser():
# Get global config argument parser
parser = configargparse.ArgumentParser(
prog='rdiffweb',
description='Web interface to browse and restore rdiff-backup repositories.',
default_config_files=['/etc/rdiffweb/rdw.conf', '/etc/rdiffweb/rdw.conf.d/*.conf'],
add_env_var_help=True,
auto_env_var_prefix='RDIFFWEB_',
config_file_parser_class=ConfigFileParser,
conflict_handler='resolve',
)
parser.add_argument(
'-f', '--config', is_config_file=True, metavar='FILE', help='location of Rdiffweb configuration file'
)
parser.add(
'--database-uri',
'--sqlitedb-file',
'--sqlitedbfile',
metavar='URI',
help="""Location of the database used for persistence. SQLite and PostgreSQL
database are supported officially. To use a SQLite database you may
define the location using a file path or a URI.
e.g.: /srv/rdiffweb/file.db or sqlite:///srv/rdiffweb/file.db`.
To use PostgreSQL server you must provide
a URI similar to postgresql://user:[email protected]/dbname and you
must install required dependencies.
By default, Rdiffweb uses a SQLite embedded database located at
/etc/rdiffweb/rdw.db.""",
default='/etc/rdiffweb/rdw.db',
)
parser.add_argument(
'-d',
'--debug',
action='store_true',
help='enable rdiffweb debug mode - change the log level to DEBUG, print exception stack trace to the web interface and show SQL query in logs',
)
parser.add_argument(
'--admin-user',
'--adminuser',
metavar='USERNAME',
help='administrator username. The administrator user get created on startup if the database is empty.',
default='admin',
)
parser.add_argument(
'--admin-password',
metavar='USERNAME',
help="""administrator encrypted password as SSHA. Read online
documentation to know more about how to encrypt your password
into SSHA or use http://projects.marsching.org/weave4j/util/genpassword.php
When defined, administrator password cannot be updated using the web interface.
When undefined, default administrator password is `admin123` and
it can be updated using the web interface.""",
)
parser.add_argument(
'--default-theme',
'--defaulttheme',
help='define the default theme. Either: default, blue or orange. Define the CSS file to be loaded in the web interface. You may manually edit a CSS file to customize it. The location is similar to `/usr/local/lib/python3.9/dist-packages/rdiffweb/static/`',
choices=['default', 'blue', 'orange'],
default='default',
)
parser.add_argument(
'--environment',
choices=['development', 'production'],
help='define the type of environment: development, production. This is used to limit the information shown to the user when an error occur.',
default='production',
)
parser.add_argument(
'--email-encryption',
'--emailencryption',
choices=['none', 'ssl', 'starttls'],
help='type of encryption to be used when establishing communication with SMTP server. Default: none',
default='none',
)
parser.add_argument(
'--email-host',
'--emailhost',
metavar='HOST',
help='SMTP server used to send email in the form <host>:<port>. If the port is not provided, default to standard port 25 or 465 is used. e.g.: smtp.gmail.com:587',
)
parser.add_argument(
'--email-sender',
'--emailsender',
metavar='EMAIL',
help='email addres used for the `from:` field when sending email.',
)
parser.add_argument(
'--email-notification-time',
'--emailnotificationtime',
metavar='TIME',
help='time when the email notifcation should be sent for inactive backups. e.g.: 22:00 Default value: 23:00',
default='23:00',
)
parser.add_argument(
'--email-username',
'--emailusername',
metavar='USERNAME',
help='username used for authentication with the SMTP server.',
)
parser.add_argument(
'--email-password',
'--emailpassword',
metavar='PASSWORD',
help='password used for authentication with the SMTP server.',
)
parser.add_argument(
'--email-send-changed-notification',
'--emailsendchangednotification',
help='True to send notification when sensitive information get change in user profile.',
action='store_true',
default=False,
)
parser.add_argument(
'--favicon',
help='location of an icon to be used as a favicon displayed in web browser.',
default=pkg_resources.resource_filename('rdiffweb', 'static/favicon.ico'),
) # @UndefinedVariable
parser.add_argument(
'--footer-name', '--footername', help=argparse.SUPPRESS, default='rdiffweb'
) # @UndefinedVariable
parser.add_argument(
'--footer-url', '--footerurl', help=argparse.SUPPRESS, default='https://rdiffweb.org/'
) # @UndefinedVariable
parser.add_argument(
'--header-logo',
'--headerlogo',
help='location of an image (preferably a .png) to be used as a replacement for the rdiffweb logo.',
)
parser.add_argument(
'--header-name',
'--headername',
help='application name displayed in the title bar and header menu.',
default='Rdiffweb',
)
parser.add_argument(
'--ldap-add-missing-user',
'--addmissinguser',
action='store_true',
help='enable creation of users from LDAP when the credential are valid.',
default=False,
)
parser.add_argument(
'--ldap-add-user-default-role',
help='default role used when creating users from LDAP. This parameter is only useful when `--ldap-add-missing-user` is enabled.',
default='user',
choices=['admin', 'maintainer', 'user'],
)
parser.add_argument(
'--ldap-add-user-default-userroot',
help='default user root directory used when creating users from LDAP. LDAP attributes may be used to define the default location. e.g.: `/backups/{uid[0]}/`. This parameter is only useful when `--ldap-add-missing-user` is enabled.',
default='',
)
parser.add_argument(
'--ldap-uri',
'--ldapuri',
help='URL to the LDAP server used to validate user credentials. e.g.: ldap://localhost:389',
)
parser.add_argument(
'--ldap-base-dn',
'--ldapbasedn',
metavar='DN',
help='DN of the branch of the directory where all searches should start from. e.g.: dc=my,dc=domain',
default="",
)
parser.add_argument(
'--ldap-scope',
'--ldapscope',
help='scope of the search. Can be either base, onelevel or subtree',
choices=['base', 'onelevel', 'subtree'],
default="subtree",
)
parser.add_argument('--ldap-tls', '--ldaptls', action='store_true', help='enable TLS')
parser.add_argument(
'--ldap-username-attribute',
'--ldapattribute',
metavar='ATTRIBUTE',
help="The attribute to search username. If no attributes are provided, the default is to use `uid`. It's a good idea to choose an attribute that will be unique across all entries in the subtree you will be using.",
default='uid',
)
parser.add_argument(
'--ldap-filter',
'--ldapfilter',
help="search filter to limit LDAP lookup. If not provided, defaults to (objectClass=*), which searches for all objects in the tree.",
default='(objectClass=*)',
)
parser.add_argument(
'--ldap-required-group',
'--ldaprequiredgroup',
metavar='GROUPNAME',
help="name of the group of which the user must be a member to access rdiffweb. Should be used with ldap-group-attribute and ldap-group-attribute-is-dn.",
)
parser.add_argument(
'--ldap-group-attribute',
'--ldapgroupattribute',
metavar='ATTRIBUTE',
help="name of the attribute defining the groups of which the user is a member. Should be used with ldap-required-group and ldap-group-attribute-is-dn.",
default='member',
)
parser.add_argument(
'--ldap-group-attribute-is-dn',
'--ldapgroupattributeisdn',
help="True if the content of the attribute `ldap-group-attribute` is a DN.",
action='store_true',
)
parser.add_argument(
'--ldap-bind-dn',
'--ldapbinddn',
metavar='DN',
help="optional DN used to bind to the server when searching for entries. If not provided, will use an anonymous bind.",
default="",
)
parser.add_argument(
'--ldap-bind-password',
'--ldapbindpassword',
metavar='PASSWORD',
help="password to use in conjunction with LdapBindDn. Note that the bind password is probably sensitive data, and should be properly protected. You should only use the LdapBindDn and LdapBindPassword if you absolutely need them to search the directory.",
default="",
)
parser.add_argument(
'--ldap-version',
'--ldapversion',
'--ldapprotocolversion',
help="version of LDAP in use either 2 or 3. Default to 3.",
default=3,
type=int,
choices=[2, 3],
)
parser.add_argument(
'--ldap-network-timeout',
'--ldapnetworktimeout',
metavar='SECONDS',
help="timeout in seconds value used for LDAP connection",
default=100,
type=int,
)
parser.add_argument(
'--ldap-timeout',
'--ldaptimeout',
metavar='SECONDS',
help="timeout in seconds value used for LDAP request",
default=300,
type=int,
)
parser.add_argument(
'--ldap-encoding',
'--ldapencoding',
metavar='ENCODING',
help="encoding used by your LDAP server.",
default="utf-8",
)
parser.add_argument(
'--log-access-file', '--logaccessfile', metavar='FILE', help='location of Rdiffweb log access file.'
)
parser.add_argument(
'--log-file',
'--logfile',
metavar='FILE',
help='location of Rdiffweb log file. Print log to the console if not define in config file.',
)
parser.add_argument(
'--log-level',
'--loglevel',
help='Define the log level.',
choices=['ERROR', 'WARN', 'INFO', 'DEBUG'],
default='INFO',
)
parser.add_argument(
'--max-depth',
'--maxdepth',
metavar='DEPTH',
help="define the maximum folder depthness to search into the user's root directory to find repositories. This is commonly used if you repositories are organised with multiple sub-folder.",
type=int,
default=3,
)
parser.add('--quota-set-cmd', '--quotasetcmd', metavar='COMMAND', help="command line to set the user's quota.")
parser.add('--quota-get-cmd', '--quotagetcmd', metavar='COMMAND', help="command line to get the user's quota.")
parser.add(
'--quota-used-cmd', '--quotausedcmd', metavar='COMMAND', help="Command line to get user's quota disk usage."
)
parser.add(
'--remove-older-time',
'--removeoldertime',
metavar='TIME',
help="Time when to execute the remove older scheduled job. e.g.: 22:30",
default='23:00',
)
parser.add('--server-host', '--serverhost', metavar='IP', default='127.0.0.1', help='IP address to listen to')
parser.add(
'--server-port',
'--serverport',
metavar='PORT',
help='port to listen to for HTTP request',
default='8080',
type=int,
)
parser.add(
'--rate-limit-dir',
'--session-dir',
'--sessiondir',
metavar='FOLDER',
help='location where to store rate-limit information. When undefined, the data is kept in memory. `--session-dir` are deprecated and kept for backward compatibility.',
)
parser.add(
'--rate-limit',
metavar='LIMIT',
type=int,
default=20,
help='maximum number of requests per hour that can be made on sensitive endpoints. When this limit is reached, an HTTP 429 message is returned to the user or the user is logged out. This security measure is used to limit brute force attacks on the login page and the RESTful API.',
)
parser.add(
'--session-idle-timeout',
metavar='MINUTES',
help='This timeout defines the amount of time a session will remain active in case there is no activity in the session. User Session will be revoke after this period of inactivity, unless the user selected "remember me". Default 5 minutes.',
default=5,
)
parser.add(
'--session-absolute-timeout',
metavar='MINUTES',
help='This timeout defines the maximum amount of time a session can be active. After this period, user is forced to (re)authenticate, unless the user selected "remember me". Default 20 minutes.',
default=20,
)
parser.add(
'--session-persistent-timeout',
metavar='MINUTES',
help='This timeout defines the maximum amount of time to remember and trust a user device. This timeout is used when user select "remember me". Default 30 days.',
default=43200,
)
parser.add(
'--ssl-certificate',
'--sslcertificate',
metavar='CERT',
help='location of the SSL Certification to enable HTTPS (not recommended)',
)
parser.add(
'--ssl-private-key',
'--sslprivatekey',
metavar='KEY',
help='location of the SSL Private Key to enable HTTPS (not recommended)',
)
parser.add(
'--tempdir',
metavar='FOLDER',
help='alternate temporary folder to be used when restoring files. Might be useful if the default location has limited disk space. Default to TEMPDIR environment or `/tmp`.',
)
parser.add(
'--disable-ssh-keys',
action='store_true',
help='used to hide SSH Key management to avoid users to add or remove SSH Key using the web application',
default=False,
)
parser.add(
'--password-min-length',
type=int,
help="Minimum length of the user's password",
default=8,
)
parser.add(
'--password-max-length',
type=int,
help="Maximum length of the user's password",
default=128,
)
parser.add(
'--password-score',
type=lambda x: max(1, min(int(x), 4)),
help="Minimum zxcvbn's score for password. Value from 1 to 4. Default value 2. Read more about it here: https://github.com/dropbox/zxcvbn",
default=2,
)
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
# Here we append a list of arguments for each locale.
flags = ['--welcome-msg'] + ['--welcome-msg-' + i for i in ['ca', 'en', 'es', 'fr', 'ru']] + ['--welcomemsg']
parser.add_argument(
*flags,
metavar='HTML',
help='replace the welcome message displayed in the login page for default locale or for a specific locale',
action=LocaleAction
)
return parser
def parse_args(args=None, config_file_contents=None):
args = sys.argv[1:] if args is None else args
return get_parser().parse_args(args, config_file_contents=config_file_contents)
class LocaleAction(argparse.Action):
"""
Custom Action to support defining arguments with locale.
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(LocaleAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
if option_string[-3] == '-':
# When using arguments, we can extract the locale from the argument key
locale = option_string[-2:]
elif value[2] == ':':
# When using config file, the locale could be extract from the value e.g. fr:message
locale = value[0:2]
value = value[3:]
else:
locale = ''
# Create a dictionary with locale.
items = getattr(namespace, self.dest) or {}
items[locale] = value
setattr(namespace, self.dest, items)
class ConfigFileParser(object):
"""
Custom config file parser to support rdiffweb config file format.
"""
def get_syntax_description(self):
msg = "Configuration file syntax allows: key=value, flag=true."
return msg
def parse(self, stream):
"""
Used to read the rdiffweb config file as dict.
"""
result = OrderedDict()
for i, line in enumerate(stream):
line = re.compile("(.*?)#.*").sub(r'\1', line).strip()
if not line:
continue
if '=' not in line:
raise configargparse.ConfigFileParserException(
"Unexpected line {} in {}: {}".format(i, getattr(stream, 'name', 'stream'), line)
)
split_line = line.partition('=')
if not len(split_line) == 3:
raise configargparse.ConfigFileParserException(
"Unexpected line {} in {}: {}".format(i, getattr(stream, 'name', 'stream'), line)
)
# Get key a& value
key = split_line[0].lower().strip().replace('_', '-')
value = split_line[2].strip()
# Support welcome-msg locale for backward compatibility
m = re.match("welcome-?msg\\[(ca|en|es|fr|ru)\\]", key.lower())
if m:
key = "welcome-msg-" + m.group(1)
value = m.group(1) + ":" + value
result[key] = value
# This dictionary is read by cherrypy. So create appropriate structure.
return result
class Option(object):
def __init__(self, key):
assert key
self.key = key
def __get__(self, instance, owner):
"""
Return a property to wrap the given option.
"""
return self.get(instance)
def get(self, instance=None):
"""
Return the value of this options.
"""
if isinstance(instance, Application):
app = instance
else:
app = cherrypy.request.app or getattr(instance, 'app', None)
assert app, "Option() can't get reference to app"
assert app.cfg, "Option() can't get reference to app.cfg"
return getattr(app.cfg, self.key)
|
PYSEC-2022-42978
|
rdiffweb/core/model/_user.py
|
@@ -24,7 +24,7 @@
from sqlalchemy import Column, Integer, SmallInteger, String, and_, event, inspect, or_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.hybrid import hybrid_property
-from sqlalchemy.orm import deferred, relationship
+from sqlalchemy.orm import deferred, relationship, validates
from zxcvbn import zxcvbn
import rdiffweb.tools.db # noqa
@@ -74,9 +74,9 @@ class UserObject(Base):
PATTERN_USERNAME = r"[a-zA-Z0-9_.\-]+$"
userid = Column('UserID', Integer, primary_key=True)
- _username = Column('Username', String, nullable=False, unique=True)
+ username = Column('Username', String, nullable=False, unique=True)
hash_password = Column('Password', String, nullable=False, default="")
- _user_root = Column('UserRoot', String, nullable=False, default="")
+ user_root = Column('UserRoot', String, nullable=False, default="")
_is_admin = deferred(
Column(
'IsAdmin',
@@ -86,7 +86,7 @@ class UserObject(Base):
doc="DEPRECATED This column is replaced by 'role'",
)
)
- _email = Column('UserEmail', String, nullable=False, default="")
+ email = Column('UserEmail', String, nullable=False, default="")
restore_format = deferred(
Column(
'RestoreFormat',
@@ -96,7 +96,7 @@ class UserObject(Base):
doc="DEPRECATED This column is not used anymore",
)
)
- _role = Column('role', SmallInteger, nullable=False, server_default=str(USER_ROLE))
+ role = Column('role', SmallInteger, nullable=False, server_default=str(USER_ROLE), default=USER_ROLE)
fullname = Column('fullname', String, nullable=False, default="")
mfa = Column('mfa', SmallInteger, nullable=False, default=DISABLED_MFA)
repo_objs = relationship(
@@ -129,7 +129,7 @@ def create_admin_user(cls, default_username, default_password):
userobj.add()
@classmethod
- def add_user(cls, username, password=None, **attrs):
+ def add_user(cls, username, password=None, role=USER_ROLE, **attrs):
"""
Used to add a new user with an optional password.
"""
@@ -143,6 +143,7 @@ def add_user(cls, username, password=None, **attrs):
userobj = UserObject(
username=username,
hash_password=hash_password(password) if password else '',
+ role=role,
**attrs,
).add()
# Raise event
@@ -383,51 +384,11 @@ def set_password(self, password):
def __eq__(self, other):
return type(self) == type(other) and inspect(self).key == inspect(other).key
- @hybrid_property
- def username(self):
- return self._username
-
- @username.setter
- def username(self, value):
- oldvalue = self._username
- self._username = value
- if oldvalue != value:
- cherrypy.engine.publish('user_attr_changed', self, {'username': (oldvalue, value)})
-
- @hybrid_property
- def role(self):
- if self._role is None:
- return self.USER_ROLE
- return self._role
-
- @role.setter
- def role(self, value):
- oldvalue = self._role
- self._role = value
- if oldvalue != value:
- cherrypy.engine.publish('user_attr_changed', self, {'role': (oldvalue, value)})
-
- @hybrid_property
- def email(self):
- return self._email
-
- @email.setter
- def email(self, value):
- oldvalue = self._email
- self._email = value
- if oldvalue != value:
- cherrypy.engine.publish('user_attr_changed', self, {'email': (oldvalue, value)})
-
- @hybrid_property
- def user_root(self):
- return self._user_root
-
- @user_root.setter
- def user_root(self, value):
- oldvalue = self._user_root
- self._user_root = value
- if oldvalue != value:
- cherrypy.engine.publish('user_attr_changed', self, {'user_root': (oldvalue, value)})
+ @validates('username')
+ def validates_username(self, key, value):
+ if self.username:
+ raise ValueError('Username cannot be modified.')
+ return value
def validate_access_token(self, token):
"""
@@ -460,3 +421,19 @@ def user_after_delete(mapper, connection, target):
Publish event when user is deleted.
"""
cherrypy.engine.publish('user_deleted', target.username)
+
+
[email protected]_for(UserObject, 'after_update')
+def user_attr_changed(mapper, connection, target):
+ changes = {}
+ state = inspect(target)
+ for attr in state.attrs:
+ if attr.key in ['user_root', 'email', 'role', 'mfa']:
+ hist = attr.load_history()
+ if hist.has_changes():
+ changes[attr.key] = (
+ hist.deleted[0] if len(hist.deleted) >= 1 else None,
+ hist.added[0] if len(hist.added) >= 1 else None,
+ )
+ if changes:
+ cherrypy.engine.publish('user_attr_changed', target, changes)
|
# -*- coding: utf-8 -*-
# rdiffweb, A web interface to rdiff-backup repositories
# Copyright (C) 2012-2021 rdiffweb contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
import os
import secrets
import string
import cherrypy
from sqlalchemy import Column, Integer, SmallInteger, String, and_, event, inspect, or_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import deferred, relationship
from zxcvbn import zxcvbn
import rdiffweb.tools.db # noqa
from rdiffweb.core import authorizedkeys
from rdiffweb.core.passwd import check_password, hash_password
from rdiffweb.tools.i18n import ugettext as _
from ._repo import RepoObject
from ._sshkey import SshKey
from ._token import Token
logger = logging.getLogger(__name__)
Base = cherrypy.tools.db.get_base()
SEP = b'/'
class DuplicateSSHKeyError(Exception):
"""
Raised by add_authorizedkey when trying to add the same SSH Key twice.
"""
pass
class UserObject(Base):
__tablename__ = 'users'
__table_args__ = {'sqlite_autoincrement': True}
# Value for role.
ADMIN_ROLE = 0
MAINTAINER_ROLE = 5
USER_ROLE = 10
ROLES = {
'admin': ADMIN_ROLE,
'maintainer': MAINTAINER_ROLE,
'user': USER_ROLE,
}
# Value for mfa field
DISABLED_MFA = 0
ENABLED_MFA = 1
# Regex pattern to be used for validation.
PATTERN_EMAIL = r"[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,4}$"
PATTERN_FULLNAME = r"""[^!"#$%&()*+,./:;<=>?@[\]_{|}~]+$"""
PATTERN_USERNAME = r"[a-zA-Z0-9_.\-]+$"
userid = Column('UserID', Integer, primary_key=True)
_username = Column('Username', String, nullable=False, unique=True)
hash_password = Column('Password', String, nullable=False, default="")
_user_root = Column('UserRoot', String, nullable=False, default="")
_is_admin = deferred(
Column(
'IsAdmin',
SmallInteger,
nullable=False,
server_default="0",
doc="DEPRECATED This column is replaced by 'role'",
)
)
_email = Column('UserEmail', String, nullable=False, default="")
restore_format = deferred(
Column(
'RestoreFormat',
SmallInteger,
nullable=False,
server_default="1",
doc="DEPRECATED This column is not used anymore",
)
)
_role = Column('role', SmallInteger, nullable=False, server_default=str(USER_ROLE))
fullname = Column('fullname', String, nullable=False, default="")
mfa = Column('mfa', SmallInteger, nullable=False, default=DISABLED_MFA)
repo_objs = relationship(
'RepoObject',
foreign_keys='UserObject.userid',
primaryjoin='UserObject.userid == RepoObject.userid',
uselist=True,
lazy=True,
order_by=lambda: RepoObject.repopath,
)
@classmethod
def get_user(cls, user):
"""Return a user object."""
return UserObject.query.filter(UserObject.username == user).first()
@classmethod
def create_admin_user(cls, default_username, default_password):
# Check if admin user exists. If not, created it.
userobj = UserObject.get_user(default_username)
if not userobj:
userobj = cls.add_user(default_username, role=UserObject.ADMIN_ROLE, user_root='/backups')
# Also make sure to update the password with latest value from config file.
if default_password and default_password.startswith('{SSHA}'):
userobj.hash_password = default_password
elif default_password:
userobj.hash_password = hash_password(default_password)
else:
userobj.hash_password = hash_password('admin123')
userobj.add()
@classmethod
def add_user(cls, username, password=None, **attrs):
"""
Used to add a new user with an optional password.
"""
assert password is None or isinstance(password, str)
# Check if user already exists.
if UserObject.get_user(username):
raise ValueError(_("User %s already exists." % (username,)))
# Find a database where to add the user
logger.info("adding new user [%s]", username)
userobj = UserObject(
username=username,
hash_password=hash_password(password) if password else '',
**attrs,
).add()
# Raise event
cherrypy.engine.publish('user_added', userobj)
# Return user object
return userobj
def add_authorizedkey(self, key, comment=None):
"""
Add the given key to the user. Adding the key to his `authorized_keys`
file if it exists and adding it to database.
"""
# Parse and validate ssh key
assert key
key = authorizedkeys.check_publickey(key)
# Remove option, replace comments.
key = authorizedkeys.AuthorizedKey(
options=None, keytype=key.keytype, key=key.key, comment=comment or key.comment
)
# If a filename exists, use it by default.
filename = os.path.join(self.user_root, '.ssh', 'authorized_keys')
if os.path.isfile(filename):
with open(filename, mode="r+", encoding='utf-8') as fh:
if authorizedkeys.exists(fh, key):
raise DuplicateSSHKeyError(_("SSH key already exists"))
logger.info("add key [%s] to [%s] authorized_keys", key, self.username)
authorizedkeys.add(fh, key)
else:
# Also look in database.
logger.info("add key [%s] to [%s] database", key, self.username)
try:
SshKey(userid=self.userid, fingerprint=key.fingerprint, key=key.getvalue()).add()
except IntegrityError:
SshKey.session.rollback()
raise DuplicateSSHKeyError(
_("Duplicate key. This key already exists or is associated to another user.")
)
cherrypy.engine.publish('user_attr_changed', self, {'authorizedkeys': True})
def add_access_token(self, name, expiration_time=None, length=16):
"""
Create a new access token. Return the un-encrypted value of the token.
"""
assert name
assert length >= 8
# Generate a random token
token = ''.join(secrets.choice(string.ascii_lowercase) for i in range(length))
# Store hash token
try:
obj = Token(userid=self.userid, name=name, hash_token=hash_password(token), expiration_time=expiration_time)
obj.add()
except IntegrityError:
Token.session.rollback()
raise ValueError(_("Duplicate token name: %s") % name)
cherrypy.engine.publish('access_token_added', self, name)
return token
def valid_user_root(self):
"""
Check if the current user_root is valid and readable
"""
try:
return os.access(self.user_root, os.F_OK) and os.path.isdir(self.user_root)
except Exception:
return False
def delete(self, *args, **kwargs):
cfg = cherrypy.tree.apps[''].cfg
if self.username == cfg.admin_user:
raise ValueError(_("can't delete admin user"))
# FIXME This should be deleted by cascade
SshKey.query.filter(SshKey.userid == self.userid).delete()
RepoObject.query.filter(RepoObject.userid == self.userid).delete()
Token.query.filter(Token.userid == self.userid).delete()
# Delete ourself
Base.delete(self)
def delete_authorizedkey(self, fingerprint):
"""
Remove the given key from the user. Remove the key from his
`authorized_keys` file if it exists and from database database.
"""
# If a filename exists, use it by default.
filename = os.path.join(self.user_root, '.ssh', 'authorized_keys')
if os.path.isfile(filename):
with open(filename, mode='r+', encoding='utf-8') as fh:
logger.info("removing key [%s] from [%s] authorized_keys", fingerprint, self.username)
authorizedkeys.remove(fh, fingerprint)
else:
# Also look in database.
logger.info("removing key [%s] from [%s] database", fingerprint, self.username)
SshKey.query.filter(and_(SshKey.userid == self.userid, SshKey.fingerprint == fingerprint)).delete()
cherrypy.engine.publish('user_attr_changed', self, {'authorizedkeys': True})
def delete_access_token(self, name):
assert name
if not Token.query.filter(Token.userid == self.userid, Token.name == name).delete():
raise ValueError(_("token name doesn't exists: %s") % name)
@property
def disk_usage(self):
# Skip if user_root is invalid.
if not self.user_root or not os.path.exists(self.user_root):
return 0
values = cherrypy.engine.publish('get_disk_usage', self)
# Return the first not None value
return next((v for v in values if v is not None), 0)
@property
def disk_quota(self):
# Skip if user_root is invalid.
if not self.user_root or not os.path.exists(self.user_root):
return 0
values = cherrypy.engine.publish('get_disk_quota', self)
# Return the first not None value
return next((v for v in values if v is not None), 0)
@disk_quota.setter
def disk_quota(self, value):
# Skip if user_root is invalid.
if not self.user_root or not os.path.exists(self.user_root):
return
cherrypy.engine.publish('set_disk_quota', self, value)
@property
def authorizedkeys(self):
"""
Return an iterator on the authorized key. Either from his
`authorized_keys` file if it exists or from database.
"""
# If a filename exists, use it by default.
filename = os.path.join(self.user_root, '.ssh', 'authorized_keys')
if os.path.isfile(filename):
for k in authorizedkeys.read(filename):
yield k
# Also look in database.
for record in SshKey.query.filter(SshKey.userid == self.userid).all():
yield authorizedkeys.check_publickey(record.key)
def refresh_repos(self, delete=False):
"""
Return list of repositories object to reflect the filesystem folders.
Return a RepoObject for each sub directories under `user_root` with `rdiff-backup-data`.
"""
# Update the repositories by walking in the directory tree.
def _onerror(unused):
logger.error('error updating user [%s] repos' % self.username, exc_info=1)
# Get application config
cfg = cherrypy.tree.apps[''].cfg
dirty = False
records = RepoObject.query.filter(RepoObject.userid == self.userid).order_by(RepoObject.repopath).all()
user_root = os.fsencode(self.user_root)
for root, dirs, unused_files in os.walk(user_root, _onerror):
for name in dirs.copy():
if name.startswith(b'.'):
dirs.remove(name)
if b'rdiff-backup-data' in dirs:
repopath = os.path.relpath(root, start=user_root)
del dirs[:]
# Handle special scenario when the repo is the
# user_root
repopath = b'' if repopath == b'.' else repopath
# Check if repo path exists.
record_match = next((record for record in records if record.repopath == os.fsdecode(repopath)), None)
if not record_match:
# Add repository to database.
RepoObject(user=self, repopath=os.fsdecode(repopath)).add()
dirty = True
else:
records.remove(record_match)
if root.count(SEP) - user_root.count(SEP) >= cfg.max_depth:
del dirs[:]
# If enabled, remove entried from database
if delete:
for record in records:
RepoObject.query.filter(RepoObject.repoid == record.repoid).delete()
return dirty
@hybrid_property
def is_admin(self):
return self.role <= self.ADMIN_ROLE
@hybrid_property
def is_ldap(self):
return self.hash_password is None or self.hash_password == ''
@is_ldap.expression
def is_ldap(cls):
return or_(cls.hash_password.is_(None), cls.hash_password == '')
@hybrid_property
def is_maintainer(self):
return self.role <= self.MAINTAINER_ROLE
def set_password(self, password):
"""
Change the user's password. Raise a ValueError if the username or
the password are invalid.
"""
assert isinstance(password, str)
if not password:
raise ValueError("password can't be empty")
cfg = cherrypy.tree.apps[''].cfg
# Cannot update admin-password if defined
if self.username == cfg.admin_user and cfg.admin_password:
raise ValueError(_("can't update admin-password defined in configuration file"))
# Check password length
if cfg.password_min_length > len(password) or len(password) > cfg.password_max_length:
raise ValueError(
_('Password must have between %(min)d and %(max)d characters.')
% {'min': cfg.password_min_length, 'max': cfg.password_max_length}
)
# Verify password score using zxcvbn
stats = zxcvbn(password)
if stats.get('score') < cfg.password_score:
msg = _('Password too weak.')
warning = stats.get('feedback', {}).get('warning')
suggestions = stats.get('feedback', {}).get('suggestions')
if warning:
msg += ' ' + warning
if suggestions:
msg += ' ' + ' '.join(suggestions)
raise ValueError(msg)
logger.info("updating user password [%s]", self.username)
self.hash_password = hash_password(password)
def __eq__(self, other):
return type(self) == type(other) and inspect(self).key == inspect(other).key
@hybrid_property
def username(self):
return self._username
@username.setter
def username(self, value):
oldvalue = self._username
self._username = value
if oldvalue != value:
cherrypy.engine.publish('user_attr_changed', self, {'username': (oldvalue, value)})
@hybrid_property
def role(self):
if self._role is None:
return self.USER_ROLE
return self._role
@role.setter
def role(self, value):
oldvalue = self._role
self._role = value
if oldvalue != value:
cherrypy.engine.publish('user_attr_changed', self, {'role': (oldvalue, value)})
@hybrid_property
def email(self):
return self._email
@email.setter
def email(self, value):
oldvalue = self._email
self._email = value
if oldvalue != value:
cherrypy.engine.publish('user_attr_changed', self, {'email': (oldvalue, value)})
@hybrid_property
def user_root(self):
return self._user_root
@user_root.setter
def user_root(self, value):
oldvalue = self._user_root
self._user_root = value
if oldvalue != value:
cherrypy.engine.publish('user_attr_changed', self, {'user_root': (oldvalue, value)})
def validate_access_token(self, token):
"""
Check if the given token matches.
"""
for access_token in Token.query.all():
# If token expired. Let delete it.
if access_token.is_expired:
access_token.delete()
continue
if check_password(token, access_token.hash_token):
# When it matches, let update the record.
access_token.access_time = datetime.datetime.utcnow
return True
return False
def validate_password(self, password):
return check_password(password, self.hash_password)
@event.listens_for(UserObject.hash_password, "set")
def hash_password_set(target, value, oldvalue, initiator):
if value and value != oldvalue:
cherrypy.engine.publish('user_password_changed', target)
@event.listens_for(UserObject, 'after_delete')
def user_after_delete(mapper, connection, target):
"""
Publish event when user is deleted.
"""
cherrypy.engine.publish('user_deleted', target.username)
|
PYSEC-2022-42978
|
rdiffweb/core/model/tests/test_user.py
|
@@ -36,11 +36,6 @@
class UserObjectTest(rdiffweb.test.WebCase):
-
- default_config = {
- 'email-send-changed-notification': True,
- }
-
def _read_ssh_key(self):
"""Readthe pub key from test packages"""
filename = pkg_resources.resource_filename('rdiffweb.core.tests', 'test_publickey_ssh_rsa.pub')
@@ -174,12 +169,16 @@ def test_get_set(self):
user.refresh_repos()
self.listener.user_attr_changed.assert_called_with(user, {'user_root': ('', self.testcases)})
self.listener.user_attr_changed.reset_mock()
+ user = UserObject.get_user('larry')
user.role = UserObject.ADMIN_ROLE
+ user.add()
self.listener.user_attr_changed.assert_called_with(
user, {'role': (UserObject.USER_ROLE, UserObject.ADMIN_ROLE)}
)
self.listener.user_attr_changed.reset_mock()
+ user = UserObject.get_user('larry')
user.email = '[email protected]'
+ user.add()
self.listener.user_attr_changed.assert_called_with(user, {'email': ('', '[email protected]')})
self.listener.user_attr_changed.reset_mock()
|
# -*- coding: utf-8 -*-
# rdiffweb, A web interface to rdiff-backup repositories
# Copyright (C) 2012-2021 rdiffweb contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Created on June 30, 2022
Module to test `user` model.
@author: Patrik Dufresne <[email protected]>
"""
import datetime
import os
from io import StringIO, open
from unittest.mock import MagicMock
import cherrypy
import pkg_resources
import rdiffweb.test
from rdiffweb.core import authorizedkeys
from rdiffweb.core.model import DuplicateSSHKeyError, RepoObject, Token, UserObject
from rdiffweb.core.passwd import check_password
class UserObjectTest(rdiffweb.test.WebCase):
default_config = {
'email-send-changed-notification': True,
}
def _read_ssh_key(self):
"""Readthe pub key from test packages"""
filename = pkg_resources.resource_filename('rdiffweb.core.tests', 'test_publickey_ssh_rsa.pub')
with open(filename, 'r', encoding='utf8') as f:
return f.readline()
def _read_authorized_keys(self):
"""Read the content of test_authorized_keys"""
filename = pkg_resources.resource_filename('rdiffweb.core.tests', 'test_authorized_keys')
with open(filename, 'r', encoding='utf8') as f:
return f.read()
def setUp(self):
super().setUp()
self.listener = MagicMock()
cherrypy.engine.subscribe('access_token_added', self.listener.access_token_added, priority=50)
cherrypy.engine.subscribe('queue_mail', self.listener.queue_mail, priority=50)
cherrypy.engine.subscribe('user_added', self.listener.user_added, priority=50)
cherrypy.engine.subscribe('user_attr_changed', self.listener.user_attr_changed, priority=50)
cherrypy.engine.subscribe('user_deleted', self.listener.user_deleted, priority=50)
cherrypy.engine.subscribe('user_login', self.listener.user_login, priority=50)
cherrypy.engine.subscribe('user_password_changed', self.listener.user_password_changed, priority=50)
def tearDown(self):
cherrypy.engine.unsubscribe('access_token_added', self.listener.access_token_added)
cherrypy.engine.unsubscribe('queue_mail', self.listener.queue_mail)
cherrypy.engine.unsubscribe('user_added', self.listener.user_added)
cherrypy.engine.unsubscribe('user_attr_changed', self.listener.user_attr_changed)
cherrypy.engine.unsubscribe('user_deleted', self.listener.user_deleted)
cherrypy.engine.unsubscribe('user_login', self.listener.user_login)
cherrypy.engine.unsubscribe('user_password_changed', self.listener.user_password_changed)
return super().tearDown()
def test_add_user(self):
"""Add user to database."""
userobj = UserObject.add_user('joe')
self.assertIsNotNone(userobj)
self.assertIsNotNone(UserObject.get_user('joe'))
# Check if listener called
self.listener.user_added.assert_called_once_with(userobj)
def test_add_user_updated_by_listener(self):
"""Add user to database."""
# Given a listener with side effet
def change_user_obj(userobj):
userobj.user_root = '/new/value'
self.listener.user_added.side_effect = change_user_obj
# When adding user
userobj = UserObject.add_user('joe')
self.assertIsNotNone(userobj)
self.assertIsNotNone(UserObject.get_user('joe'))
# Then lister get called
self.listener.user_added.assert_called_once_with(userobj)
# Then object was updated by listener
self.assertEqual('/new/value', userobj.user_root)
def test_add_user_with_duplicate(self):
"""Add user to database."""
UserObject.add_user('denise')
self.listener.user_added.reset_mock()
with self.assertRaises(ValueError):
UserObject.add_user('denise')
# Check if listener called
self.listener.user_added.assert_not_called()
def test_add_user_with_password(self):
"""Add user to database with password."""
userobj = UserObject.add_user('jo', 'password')
self.assertIsNotNone(UserObject.get_user('jo'))
# Check if listener called
self.listener.user_added.assert_called_once_with(userobj)
def test_delete_admin_user(self):
# Trying to delete admin user should raise an error.
userobj = UserObject.get_user('admin')
with self.assertRaises(ValueError):
userobj.delete()
def test_users(self):
# Check admin exists
self.assertEqual(1, UserObject.query.count())
# Create user.
UserObject.add_user('annik')
users = UserObject.query.all()
self.assertEqual(2, len(users))
self.assertEqual('annik', users[1].username)
# Then 2 user exists
self.assertEqual(2, UserObject.query.count())
def test_get_user(self):
# Create new user
user = UserObject.add_user('bernie', 'my-password')
user.user_root = self.testcases
user.role = UserObject.ADMIN_ROLE
user.email = '[email protected]'
user.refresh_repos()
self.assertEqual(['broker-repo', 'testcases'], sorted([r.name for r in user.repo_objs]))
user.repo_objs[0].maxage = -1
user.repo_objs[1].maxage = 3
# Get user record.
obj = UserObject.get_user('bernie')
self.assertIsNotNone(obj)
self.assertEqual('bernie', obj.username)
self.assertEqual('[email protected]', obj.email)
self.assertEqual(['broker-repo', 'testcases'], sorted([r.name for r in obj.repo_objs]))
self.assertEqual(self.testcases, obj.user_root)
self.assertEqual(True, obj.is_admin)
self.assertEqual(UserObject.ADMIN_ROLE, obj.role)
# Get repo object
self.assertEqual('broker-repo', obj.repo_objs[0].name)
self.assertEqual(-1, obj.repo_objs[0].maxage)
self.assertEqual('testcases', obj.repo_objs[1].name)
self.assertEqual(3, obj.repo_objs[1].maxage)
def test_get_user_with_invalid_user(self):
self.assertIsNone(UserObject.get_user('invalid'))
def test_get_set(self):
user = UserObject.add_user('larry', 'password')
self.assertEqual('', user.email)
self.assertEqual([], user.repo_objs)
self.assertEqual('', user.user_root)
self.assertEqual(False, user.is_admin)
self.assertEqual(UserObject.USER_ROLE, user.role)
user.user_root = self.testcases
user.refresh_repos()
self.listener.user_attr_changed.assert_called_with(user, {'user_root': ('', self.testcases)})
self.listener.user_attr_changed.reset_mock()
user.role = UserObject.ADMIN_ROLE
self.listener.user_attr_changed.assert_called_with(
user, {'role': (UserObject.USER_ROLE, UserObject.ADMIN_ROLE)}
)
self.listener.user_attr_changed.reset_mock()
user.email = '[email protected]'
self.listener.user_attr_changed.assert_called_with(user, {'email': ('', '[email protected]')})
self.listener.user_attr_changed.reset_mock()
self.assertEqual('[email protected]', user.email)
self.assertEqual(['broker-repo', 'testcases'], sorted([r.name for r in user.repo_objs]))
self.assertEqual(self.testcases, user.user_root)
self.assertEqual(True, user.is_admin)
self.assertEqual(UserObject.ADMIN_ROLE, user.role)
def test_set_password_update(self):
# Given a user in database with a password
userobj = UserObject.add_user('annik', 'password')
self.listener.user_password_changed.reset_mock()
# When updating the user's password
userobj.set_password('new_password')
# Then password is SSHA
self.assertTrue(check_password('new_password', userobj.hash_password))
# Check if listener called
self.listener.user_password_changed.assert_called_once_with(userobj)
def test_delete_user(self):
# Given an existing user in database
userobj = UserObject.add_user('vicky')
self.assertIsNotNone(UserObject.get_user('vicky'))
# When deleting that user
userobj.delete()
# Then user it no longer in database
self.assertIsNone(UserObject.get_user('vicky'))
# Then listner was called
self.listener.user_deleted.assert_called_once_with('vicky')
def test_set_password_empty(self):
"""Expect error when trying to update password of invalid user."""
userobj = UserObject.add_user('john')
with self.assertRaises(ValueError):
self.assertFalse(userobj.set_password(''))
def test_disk_quota(self):
"""
Just make a call to the function.
"""
userobj = UserObject.get_user(self.USERNAME)
userobj.disk_quota
def test_disk_usage(self):
"""
Just make a call to the function.
"""
userobj = UserObject.get_user(self.USERNAME)
disk_usage = userobj.disk_usage
self.assertIsInstance(disk_usage, int)
def test_add_authorizedkey_without_file(self):
"""
Add an ssh key for a user without an authorizedkey file.
"""
# Read the pub key
key = self._read_ssh_key()
# Add the key to the user
userobj = UserObject.get_user(self.USERNAME)
userobj.add_authorizedkey(key)
# validate
keys = list(userobj.authorizedkeys)
self.assertEqual(1, len(keys), "expecting one key")
self.assertEqual("3c:99:ed:a7:82:a8:71:09:2c:15:3d:78:4a:8c:11:99", keys[0].fingerprint)
def test_add_authorizedkey_duplicate(self):
# Read the pub key
key = self._read_ssh_key()
# Add the key to the user
userobj = UserObject.get_user(self.USERNAME)
userobj.add_authorizedkey(key)
# Add the same key
with self.assertRaises(DuplicateSSHKeyError):
userobj.add_authorizedkey(key)
def test_add_authorizedkey_with_file(self):
"""
Add an ssh key for a user with an authorizedkey file.
"""
userobj = UserObject.get_user(self.USERNAME)
# Create empty authorized_keys file
os.mkdir(os.path.join(userobj.user_root, '.ssh'))
filename = os.path.join(userobj.user_root, '.ssh', 'authorized_keys')
open(filename, 'a').close()
# Read the pub key
key = self._read_ssh_key()
userobj.add_authorizedkey(key)
# Validate
with open(filename, 'r') as fh:
self.assertEqual(key, fh.read())
def test_delete_authorizedkey_without_file(self):
"""
Remove an ssh key for a user without authorizedkey file.
"""
# Update user with ssh keys.
data = self._read_authorized_keys()
userobj = UserObject.get_user(self.USERNAME)
for k in authorizedkeys.read(StringIO(data)):
try:
userobj.add_authorizedkey(k.getvalue())
except ValueError:
# Some ssh key in the testing file are not valid.
pass
# Get the keys
keys = list(userobj.authorizedkeys)
self.assertEqual(2, len(keys))
# Remove a key
userobj.delete_authorizedkey("9a:f1:69:3c:bc:5a:cd:02:5e:33:bc:cd:c0:01:eb:4c")
# Validate
keys = list(userobj.authorizedkeys)
self.assertEqual(1, len(keys))
def test_delete_authorizedkey_with_file(self):
"""
Remove an ssh key for a user with authorizedkey file.
"""
# Create authorized_keys file
data = self._read_authorized_keys()
userobj = UserObject.get_user(self.USERNAME)
os.mkdir(os.path.join(userobj.user_root, '.ssh'))
filename = os.path.join(userobj.user_root, '.ssh', 'authorized_keys')
with open(filename, 'w') as f:
f.write(data)
# Get the keys
keys = list(userobj.authorizedkeys)
self.assertEqual(5, len(keys))
# Remove a key
userobj.delete_authorizedkey("9a:f1:69:3c:bc:5a:cd:02:5e:33:bc:cd:c0:01:eb:4c")
# Validate
keys = list(userobj.authorizedkeys)
self.assertEqual(4, len(keys))
def test_repo_objs(self):
# Given a user with a list of repositories
userobj = UserObject.get_user(self.USERNAME)
repos = sorted(userobj.repo_objs, key=lambda r: r.name)
self.assertEqual(['broker-repo', 'testcases'], [r.name for r in repos])
# When deleting a repository empty list
repos[1].delete()
# Then the repository is removed from the list.
self.assertEqual(['broker-repo'], sorted([r.name for r in userobj.repo_objs]))
def test_refresh_repos_without_delete(self):
# Given a user with invalid repositories
userobj = UserObject.get_user(self.USERNAME)
RepoObject.query.delete()
RepoObject(userid=userobj.userid, repopath='invalid').add()
self.assertEqual(['invalid'], sorted([r.name for r in userobj.repo_objs]))
# When updating the repository list without deletion
userobj.refresh_repos()
# Then the list invlaid the invalid repo and new repos
self.assertEqual(['broker-repo', 'invalid', 'testcases'], sorted([r.name for r in userobj.repo_objs]))
def test_refresh_repos_with_delete(self):
# Given a user with invalid repositories
userobj = UserObject.get_user(self.USERNAME)
RepoObject.query.delete()
RepoObject(userid=userobj.userid, repopath='invalid').add()
self.assertEqual(['invalid'], sorted([r.name for r in userobj.repo_objs]))
# When updating the repository list without deletion
userobj.refresh_repos(delete=True)
# Then the list invlaid the invalid repo and new repos
userobj.expire()
self.assertEqual(['broker-repo', 'testcases'], sorted([r.name for r in userobj.repo_objs]))
def test_refresh_repos_with_single_repo(self):
# Given a user with invalid repositories
userobj = UserObject.get_user(self.USERNAME)
userobj.user_root = os.path.join(self.testcases, 'testcases')
# When updating the repository list without deletion
userobj.refresh_repos(delete=True)
# Then the list invlaid the invalid repo and new repos
userobj.expire()
self.assertEqual([''], sorted([r.name for r in userobj.repo_objs]))
def test_add_access_token(self):
# Given a user with an email
userobj = UserObject.get_user(self.USERNAME)
userobj.email = '[email protected]'
userobj.add()
# When adding a new token
token = userobj.add_access_token('test')
# Then a new token get created
self.assertTrue(token)
tokenobj = Token.query.filter(Token.userid == userobj.userid).first()
self.assertTrue(tokenobj)
self.assertEqual(None, tokenobj.expiration_time)
self.assertEqual(None, tokenobj.access_time)
# Then an email is sent to the user.
self.listener.access_token_added.assert_called_once_with(userobj, 'test')
self.listener.queue_mail.assert_called_once()
def test_add_access_token_duplicate_name(self):
# Given a user with an existing token
userobj = UserObject.get_user(self.USERNAME)
userobj.add_access_token('test')
self.assertEqual(1, Token.query.filter(Token.userid == userobj.userid).count())
# When adding a new token with the same name
with self.assertRaises(ValueError):
userobj.add_access_token('test')
# Then token is not created
self.assertEqual(1, Token.query.filter(Token.userid == userobj.userid).count())
# Then an email is not sent.
self.listener.access_token_added.assert_called_once_with(userobj, 'test')
def test_delete_access_token(self):
# Given a user with an existing token
userobj = UserObject.get_user(self.USERNAME)
userobj.add_access_token('test')
self.assertEqual(1, Token.query.filter(Token.userid == userobj.userid).count())
# When deleting an access token
userobj.delete_access_token('test')
# Then Token get deleted
self.assertEqual(0, Token.query.filter(Token.userid == userobj.userid).count())
def test_delete_access_token_invalid(self):
# Given a user with an existing token
userobj = UserObject.get_user(self.USERNAME)
userobj.add_access_token('test')
self.assertEqual(1, Token.query.filter(Token.userid == userobj.userid).count())
# When deleting an invalid access token
with self.assertRaises(ValueError):
userobj.delete_access_token('invalid')
# Then Token not deleted
self.assertEqual(1, Token.query.filter(Token.userid == userobj.userid).count())
def test_delete_user_remove_access_tokens(self):
# Given a user with an existing token
userobj = UserObject.add_user('testuser', 'password')
userobj.add_access_token('test')
self.assertEqual(1, Token.query.filter(Token.userid == userobj.userid).count())
# When deleting the user
userobj.delete()
# Then Token get deleted
self.assertEqual(0, Token.query.filter(Token.userid == userobj.userid).count())
def test_verify_access_token(self):
# Given a user with an existing token
userobj = UserObject.get_user(self.USERNAME)
token = userobj.add_access_token('test')
self.assertEqual(1, Token.query.filter(Token.userid == userobj.userid).count())
# When validating the token
# Then token is valid
self.assertTrue(userobj.validate_access_token(token))
def test_verify_access_token_with_expired(self):
# Given a user with an existing token
userobj = UserObject.get_user(self.USERNAME)
token = userobj.add_access_token(
'test', expiration_time=datetime.datetime.now() - datetime.timedelta(seconds=1)
)
self.assertEqual(1, Token.query.filter(Token.userid == userobj.userid).count())
# When validating the token
# Then token is invalid
self.assertFalse(userobj.validate_access_token(token))
# Then token get removed
self.assertEqual(0, Token.query.filter(Token.userid == userobj.userid).count())
def test_verify_access_token_with_invalid(self):
# Given a user with an existing token
userobj = UserObject.get_user(self.USERNAME)
userobj.add_access_token('test', expiration_time=datetime.datetime.now())
self.assertEqual(1, Token.query.filter(Token.userid == userobj.userid).count())
# When validating the token
# Then token is invalid
self.assertFalse(userobj.validate_access_token('invalid'))
class UserObjectWithAdminPassword(rdiffweb.test.WebCase):
# password: test
default_config = {'admin-password': '{SSHA}wbSK4hlEX7mtGJplFi2oN6ABm6Y3Bo1e'}
def setUp(self):
# Do nothing - We need to skip the default setup to avoid deleting the records.
pass
def test_create_admin_user(self):
# Given admin-password is configure
# When database get created
# Then admin user get created with 'test' password
userobj = UserObject.get_user(self.USERNAME)
self.assertIsNotNone(userobj)
self.assertEqual('{SSHA}wbSK4hlEX7mtGJplFi2oN6ABm6Y3Bo1e', userobj.hash_password)
self.assertTrue(check_password('test', userobj.hash_password))
def test_set_password(self):
# Given admin-password is configure
# When trying to update admin password
# Then an exception is raised
userobj = UserObject.get_user(self.USERNAME)
with self.assertRaises(ValueError):
userobj.set_password('newpassword')
|
PYSEC-2022-42978
|
rdiffweb/core/notification.py
|
@@ -78,19 +78,31 @@ def user_attr_changed(self, userobj, attrs={}):
return
# Leave if the mail was not changed.
- if 'email' not in attrs:
- return
-
- old_email = attrs['email'][0]
- if not old_email:
- logger.info("can't sent mail to user [%s] without an email", userobj.username)
- return
-
- # If the email attributes was changed, send a mail notification.
- body = self.app.templates.compile_template(
- "email_changed.html", **{"header_name": self.app.cfg.header_name, 'user': userobj}
- )
- self.bus.publish('queue_mail', to=old_email, subject=_("Email address changed"), message=body)
+ if 'email' in attrs:
+ old_email = attrs['email'][0]
+ if not old_email:
+ logger.info("can't sent mail to user [%s] without an email", userobj.username)
+ return
+ # If the email attributes was changed, send a mail notification.
+ subject = _("Email address changed")
+ body = self.app.templates.compile_template(
+ "email_changed.html", **{"header_name": self.app.cfg.header_name, 'user': userobj}
+ )
+ self.bus.publish('queue_mail', to=old_email, subject=str(subject), message=body)
+
+ if 'mfa' in attrs:
+ if not userobj.email:
+ logger.info("can't sent mail to user [%s] without an email", userobj.username)
+ return
+ subject = (
+ _("Two-Factor Authentication turned off")
+ if userobj.mfa == UserObject.DISABLED_MFA
+ else _("Two-Factor Authentication turned on")
+ )
+ body = self.app.templates.compile_template(
+ "email_mfa.html", **{"header_name": self.app.cfg.header_name, 'user': userobj}
+ )
+ self.bus.publish('queue_mail', to=userobj.email, subject=str(subject), message=body)
def user_password_changed(self, userobj):
if not self.send_changed:
|
# -*- coding: utf-8 -*-
# rdiffweb, A web interface to rdiff-backup repositories
# Copyright (C) 2012-2021 rdiffweb contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Plugin used to send email to users when their repository is getting too old.
User can control the notification period.
"""
import datetime
import logging
import cherrypy
from cherrypy.process.plugins import SimplePlugin
from rdiffweb.core import librdiff
from rdiffweb.core.model import UserObject
from rdiffweb.tools.i18n import ugettext as _
logger = logging.getLogger(__name__)
class NotificationPlugin(SimplePlugin):
"""
Send email notification when a repository get too old (without a backup).
"""
execution_time = '23:00'
send_changed = False
def start(self):
self.bus.log('Start Notification plugin')
self.bus.publish('schedule_job', self.execution_time, self.notification_job)
self.bus.subscribe('access_token_added', self.access_token_added)
self.bus.subscribe('user_attr_changed', self.user_attr_changed)
self.bus.subscribe('user_password_changed', self.user_password_changed)
def stop(self):
self.bus.log('Stop Notification plugin')
self.bus.publish('unschedule_job', self.notification_job)
self.bus.unsubscribe('access_token_added', self.access_token_added)
self.bus.unsubscribe('user_attr_changed', self.user_attr_changed)
self.bus.unsubscribe('user_password_changed', self.user_password_changed)
@property
def app(self):
return cherrypy.tree.apps['']
def access_token_added(self, userobj, name):
if not self.send_changed:
return
if not userobj.email:
logger.info("can't sent mail to user [%s] without an email", userobj.username)
return
# Send a mail notification
body = self.app.templates.compile_template(
"access_token_added.html", **{"header_name": self.app.cfg.header_name, 'user': userobj, 'name': name}
)
self.bus.publish('queue_mail', to=userobj.email, subject=_("A new access token has been created"), message=body)
def user_attr_changed(self, userobj, attrs={}):
if not self.send_changed:
return
# Leave if the mail was not changed.
if 'email' not in attrs:
return
old_email = attrs['email'][0]
if not old_email:
logger.info("can't sent mail to user [%s] without an email", userobj.username)
return
# If the email attributes was changed, send a mail notification.
body = self.app.templates.compile_template(
"email_changed.html", **{"header_name": self.app.cfg.header_name, 'user': userobj}
)
self.bus.publish('queue_mail', to=old_email, subject=_("Email address changed"), message=body)
def user_password_changed(self, userobj):
if not self.send_changed:
return
if not userobj.email:
logger.info("can't sent mail to user [%s] without an email", userobj.username)
return
# If the email attributes was changed, send a mail notification.
body = self.app.templates.compile_template(
"password_changed.html", **{"header_name": self.app.cfg.header_name, 'user': userobj}
)
self.bus.publish('queue_mail', to=userobj.email, subject=_("Password changed"), message=body)
def notification_job(self):
"""
Loop trough all the user repository and send notifications.
"""
now = librdiff.RdiffTime()
def _user_repos():
"""Return a generator trought user repos to be notified."""
for user in UserObject.query.all():
# Check if user has email.
if not user.email:
continue
# Identify old repo for current user.
old_repos = []
for repo in user.repo_objs:
# Check if repo has age configured (in days)
maxage = repo.maxage
if not maxage or maxage <= 0:
continue
# Check repo age.
if repo.last_backup_date is None or repo.last_backup_date < (now - datetime.timedelta(days=maxage)):
old_repos.append(repo)
# Return an item only if user had old repo
if old_repos:
yield user, old_repos
# For each candidate, send mail.
for user, repos in _user_repos():
parms = {'user': user, 'repos': repos}
body = self.app.templates.compile_template("email_notification.html", **parms)
cherrypy.engine.publish('queue_mail', to=user.email, subject=_("Notification"), message=body)
cherrypy.notification = NotificationPlugin(cherrypy.engine)
cherrypy.notification.subscribe()
cherrypy.config.namespaces['notification'] = lambda key, value: setattr(cherrypy.notification, key, value)
|
PYSEC-2022-42978
|
rdiffweb/core/tests/test_notification.py
|
@@ -118,10 +118,13 @@ def test_email_changed(self):
# Given a user with an email address
user = UserObject.get_user(self.USERNAME)
user.email = '[email protected]'
+ user.add()
self.listener.queue_email.reset_mock()
# When updating the user's email
+ user = UserObject.get_user(self.USERNAME)
user.email = '[email protected]'
+ user.add()
# Then a email is queue to notify the user.
self.listener.queue_email.assert_called_once_with(
|
# -*- coding: utf-8 -*-
# rdiffweb, A web interface to rdiff-backup repositories
# Copyright (C) 2012-2021 rdiffweb contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Feb 13, 2016
@author: Patrik Dufresne <[email protected]>
"""
from unittest.mock import MagicMock
import cherrypy
import rdiffweb.core.notification
import rdiffweb.test
from rdiffweb.core.model import RepoObject, UserObject
class NotificationJobTest(rdiffweb.test.WebCase):
def setUp(self):
self.listener = MagicMock()
cherrypy.engine.subscribe('queue_mail', self.listener.queue_email, priority=50)
return super().setUp()
def tearDown(self):
cherrypy.engine.unsubscribe('queue_mail', self.listener.queue_email)
return super().tearDown()
def test_notification_job(self):
"""
Run the notification and check if mails are sent
"""
# Given a user with an email address and a repository with a maxage
# Set user config
user = UserObject.get_user(self.USERNAME)
user.email = '[email protected]'
user.add()
repo = RepoObject.query.filter(RepoObject.user == user, RepoObject.repopath == self.REPO).first()
repo.maxage = 1
repo.add()
# When running notification_job
cherrypy.notification.notification_job()
# Then an email is queue for this user
self.listener.queue_email.assert_called_once_with(
to='[email protected]',
subject='Notification',
message="<html>\n <head></head>\n <body>\n Hey admin,\n <p>\n You are receiving this email to notify you about your backups. The\n following repositories are inactive for some time. We invite you to have a look\n at your last backup schedule.\n </p>\n <ul>\n <li>testcases</li>\n </ul>\n <p>\n If you don't want to be notify about this. You need to review your\n user preferences.\n </p>\n </body>\n</html>",
)
def test_notification_job_undefined_last_backup_date(self):
# Given a valid user with a repository configured for notification
user = UserObject.get_user(self.USERNAME)
user.email = '[email protected]'
user.add()
# Given a repo with last_backup_date None
repo = RepoObject.query.filter(RepoObject.user == user, RepoObject.repopath == 'broker-repo').first()
repo.maxage = 1
repo.add()
self.assertIsNone(repo.last_backup_date)
# When Notification job is running
cherrypy.notification.notification_job()
# Then a notification is sent to the user.
self.listener.queue_email.assert_called_once_with(
to='[email protected]',
subject='Notification',
message="<html>\n <head></head>\n <body>\n Hey admin,\n <p>\n You are receiving this email to notify you about your backups. The\n following repositories are inactive for some time. We invite you to have a look\n at your last backup schedule.\n </p>\n <ul>\n <li>broker-repo</li>\n </ul>\n <p>\n If you don't want to be notify about this. You need to review your\n user preferences.\n </p>\n </body>\n</html>",
)
def test_notification_job_without_notification(self):
# Given a valid user with a repository configured without notification (-1)
user = UserObject.get_user(self.USERNAME)
user.email = '[email protected]'
user.add()
repo = RepoObject.query.filter(RepoObject.user == user, RepoObject.repopath == self.REPO).first()
repo.maxage = -1
repo.add()
# Call notification.
cherrypy.notification.notification_job()
# Expect it to be called.
self.listener.queue_email.assert_not_called()
class NotificationPluginTest(rdiffweb.test.WebCase):
default_config = {
'email-send-changed-notification': True,
}
def setUp(self):
self.listener = MagicMock()
cherrypy.engine.subscribe('queue_mail', self.listener.queue_email, priority=50)
return super().setUp()
def tearDown(self):
cherrypy.engine.unsubscribe('queue_mail', self.listener.queue_email)
return super().tearDown()
def test_email_changed(self):
# Given a user with an email address
user = UserObject.get_user(self.USERNAME)
user.email = '[email protected]'
self.listener.queue_email.reset_mock()
# When updating the user's email
user.email = '[email protected]'
# Then a email is queue to notify the user.
self.listener.queue_email.assert_called_once_with(
to='[email protected]',
subject='Email address changed',
message='<html>\n <head></head>\n <body>\n Hey admin,\n <p>You recently changed the email address associated with your Rdiffweb account.</p>\n <p>\n If you did not make this change and believe your account has been compromised, please contact your administrator.\n </p>\n </body>\n</html>',
)
def test_email_updated_with_same_value(self):
# Given a user with an email
user = UserObject.get_user(self.USERNAME)
user.email = '[email protected]'
self.listener.queue_email.reset_mock()
# When updating the user's email with the same value
user.email = '[email protected]'
# Then no email are sent to the user
self.listener.queue_email.assert_not_called()
def test_password_change_notification(self):
# Given a user with a email.
user = UserObject.get_user(self.USERNAME)
user.email = '[email protected]'
self.listener.queue_email.reset_mock()
# When updating the user password
user.set_password('new_password')
# Then a email is send to the user
self.listener.queue_email.assert_called_once_with(
to='[email protected]',
subject='Password changed',
message='<html>\n <head></head>\n <body>\n Hey admin,\n <p>You recently changed the password associated with your Rdiffweb account.</p>\n <p>\n If you did not make this change and believe your account has been compromised, please contact your administrator.\n </p>\n </body>\n</html>',
)
def test_password_change_with_same_value(self):
# Given a user with a email.
user = UserObject.get_user(self.USERNAME)
user.email = '[email protected]'
user.set_password('new_password')
self.listener.queue_email.reset_mock()
# When updating the user password with the same value
user.set_password('new_password')
# Then an email is sent to the user
self.listener.queue_email.assert_called_once_with(
to='[email protected]',
subject='Password changed',
message='<html>\n <head></head>\n <body>\n Hey admin,\n <p>You recently changed the password associated with your Rdiffweb account.</p>\n <p>\n If you did not make this change and believe your account has been compromised, please contact your administrator.\n </p>\n </body>\n</html>',
)
|
PYSEC-2022-42978
|
tensorflow/python/kernel_tests/array_ops/stack_op_test.py
|
@@ -16,12 +16,16 @@
import numpy as np
+from tensorflow.python import tf2
from tensorflow.python.eager import context
+from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.platform import test
@@ -69,6 +73,19 @@ def testSimpleParallelCPU(self):
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
+ def testParallelConcatShapeZero(self):
+ if not tf2.enabled():
+ self.skipTest("only fails in TF2")
+
+ @def_function.function
+ def f():
+ y = gen_array_ops.parallel_concat(values=[["tf"]], shape=0)
+ return y
+
+ with self.assertRaisesRegex(errors.InvalidArgumentError,
+ r"0th dimension of value .* is less than"):
+ f()
+
def testSimpleParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Stack and ParallelStack Ops."""
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class StackOpTest(test.TestCase):
def randn(self, shape, dtype):
data = np.random.randn(*shape)
if dtype == np.bool_:
return data < 0 # Naive casting yields True with P(1)!
else:
return data.astype(dtype)
def testSimple(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
rank = len(shape)
for axis in range(-rank, rank):
for dtype in [np.bool_, np.float32, np.int32, np.int64]:
data = self.randn(shape, dtype)
xs = np_split_squeeze(data, axis)
# Stack back into a single tensorflow tensor
with self.subTest(shape=shape, axis=axis, dtype=dtype):
c = array_ops.stack(xs, axis=axis)
self.assertAllEqual(c, data)
def testSimpleParallelCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
def testSimpleParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
def testConst(self):
np.random.seed(7)
with test_util.use_gpu():
# Verify that shape induction works with shapes produced via const stack
a = constant_op.constant([1, 2, 3, 4, 5, 6])
b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
# Check on a variety of shapes and types
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
for dtype in [np.bool_, np.float32, np.int16, np.int32, np.int64]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
# Stack back into a single tensorflow tensor directly using np array
c = array_ops.stack(data)
if not context.executing_eagerly():
# This is implemented via a Const:
self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c, data)
# Python lists also work for 1-D case:
if len(shape) == 1:
data_list = list(data)
cl = array_ops.stack(data_list)
if not context.executing_eagerly():
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl, data)
def testConstParallelCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
def testConstParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
def testGradientsAxis0(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
with self.subTest(shape=shape):
with self.cached_session():
def func(*xs):
return array_ops.stack(xs)
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
theoretical, numerical = gradient_checker_v2.compute_gradient(
func, xs)
self.assertAllClose(theoretical, numerical)
def testGradientsAxis1(self):
np.random.seed(7)
for shape in (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
out_shape = list(shape[1:])
out_shape.insert(1, shape[0])
with self.subTest(shape=shape):
with self.cached_session():
def func(*inp):
return array_ops.stack(inp, axis=1)
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
theoretical, numerical = gradient_checker_v2.compute_gradient(
func, xs)
self.assertAllClose(theoretical, numerical)
def testZeroSizeCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=False):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
def testZeroSizeGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=True):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
def testAxis0DefaultCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=False):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAxis0DefaultGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for shape in (3,), (2, 2, 3), (4, 1, 2, 2), (8, 2, 10):
rank = len(shape)
expected = self.randn(shape, np.float32)
for dtype in [np.bool_, np.float32, np.int32, np.int64]:
# For all the possible axis to split it, including negative indices.
for axis in range(-rank, rank):
test_arrays = np_split_squeeze(expected, axis)
with self.cached_session():
with self.subTest(shape=shape, dtype=dtype, axis=axis):
actual_pack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = self.evaluate(actual_pack)
actual_stack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = self.evaluate(actual_stack)
self.assertNDArrayNear(expected, actual_stack, 1e-6)
def testDimOutOfRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError,
r"Argument `axis` = 2 not in range \[-2, 2\)"):
array_ops.stack(t, axis=2)
def testDimOutOfNegativeRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError,
r"Argument `axis` = -3 not in range \[-2, 2\)"):
array_ops.stack(t, axis=-3)
def testComplex(self):
np.random.seed(7)
with self.session():
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
for dtype in [np.complex64, np.complex128]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs)
self.assertAllEqual(self.evaluate(c), data)
class AutomaticStackingTest(test.TestCase):
def testSimple(self):
self.assertAllEqual([1, 0, 2],
ops.convert_to_tensor([1, constant_op.constant(0), 2]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([[0, 0, 0],
[0,
constant_op.constant(1), 0],
[0, 0, 0]]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([[0, 0, 0],
constant_op.constant([0, 1, 0]),
[0, 0, 0]]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([
constant_op.constant([0, 0, 0]),
constant_op.constant([0, 1, 0]),
constant_op.constant([0, 0, 0])
]))
def testWithNDArray(self):
with self.session():
result = ops.convert_to_tensor([[[0., 0.],
constant_op.constant([1., 1.])],
np.array(
[[2., 2.], [3., 3.]],
dtype=np.float32)])
self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]],
self.evaluate(result))
def testDtype(self):
t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]])
self.assertEqual(dtypes.float64, t_1.dtype)
t_2 = ops.convert_to_tensor(
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
t_3 = ops.convert_to_tensor(
[[0., 0., 0.],
constant_op.constant([0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_3.dtype)
t_4 = ops.convert_to_tensor(
[constant_op.constant([0., 0., 0.], dtype=dtypes.float64)],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_4.dtype)
with self.assertRaises(TypeError):
ops.convert_to_tensor([
constant_op.constant(
[0., 0., 0.], dtype=dtypes.float32), constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
])
def testDtypeConversionWhenTensorDtypeMismatch(self):
t_0 = ops.convert_to_tensor([0., 0., 0.])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([0, 0, 0])
self.assertEqual(dtypes.int32, t_1.dtype)
t_2 = ops.convert_to_tensor([t_0, t_0, t_1], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
if __name__ == "__main__":
test.main()
|
PYSEC-2021-816
|
beaker/crypto/pycrypto.py
|
@@ -15,17 +15,18 @@ def aesEncrypt(data, key):
except ImportError:
from Crypto.Cipher import AES
+ from Crypto.Util import Counter
def aesEncrypt(data, key):
- cipher = AES.new(key)
+ cipher = AES.new(key, AES.MODE_CTR,
+ counter=Counter.new(128, initial_value=0))
- data = data + (" " * (16 - (len(data) % 16)))
return cipher.encrypt(data)
def aesDecrypt(data, key):
- cipher = AES.new(key)
-
- return cipher.decrypt(data).rstrip()
+ cipher = AES.new(key, AES.MODE_CTR,
+ counter=Counter.new(128, initial_value=0))
+ return cipher.decrypt(data)
def getKeyLength():
return 32
|
"""Encryption module that uses pycryptopp or pycrypto"""
try:
# Pycryptopp is preferred over Crypto because Crypto has had
# various periods of not being maintained, and pycryptopp uses
# the Crypto++ library which is generally considered the 'gold standard'
# of crypto implementations
from pycryptopp.cipher import aes
def aesEncrypt(data, key):
cipher = aes.AES(key)
return cipher.process(data)
# magic.
aesDecrypt = aesEncrypt
except ImportError:
from Crypto.Cipher import AES
def aesEncrypt(data, key):
cipher = AES.new(key)
data = data + (" " * (16 - (len(data) % 16)))
return cipher.encrypt(data)
def aesDecrypt(data, key):
cipher = AES.new(key)
return cipher.decrypt(data).rstrip()
def getKeyLength():
return 32
|
PYSEC-2012-1
|
bottle.py
|
@@ -16,7 +16,7 @@
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
-__version__ = '0.12.19'
+__version__ = '0.12.20'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2016, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.12.19'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import base64, cgi, email.utils, functools, hmac, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings, hashlib
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
if py >= (3, 3, 0):
from collections.abc import MutableMapping as DictMixin
from types import ModuleType as new_module
else:
from collections import MutableMapping as DictMixin
from imp import new_module
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from imp import new_module
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, hard=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
''' Turn all capturing groups in a regular expression pattern into
non-capturing groups. '''
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new rule or replace the target for an existing rule. '''
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
target = None
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(method)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config, make_namespaces=True)
def __call__(self, *a, **ka):
depr("Some APIs changed to return Route() instances instead of"\
" callables. Make sure to use the Route.call method and not to"\
" call Route instances directly.") #0.12
return self.call(*a, **ka)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
@property
def _context(self):
depr('Switch to Plugin API v2 and access the Route object directly.') #0.12
return dict(rule=self.rule, method=self.method, callback=self.callback,
name=self.name, app=self.app, config=self.config,
apply=self.plugins, skip=self.skiplist)
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
api = getattr(plugin, 'api', 1)
context = self if api > 1 else self._context
callback = plugin.apply(callback, context)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
''' Return the callback. If the callback is a decorated function, try to
recover the original function. '''
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
''' Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. '''
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
''' Lookup a config field and return its value, first checking the
route.config, then route.app.config.'''
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
''' Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
'''
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
''' Remove a callback from a hook. '''
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
''' Trigger a hook and return a list of results. '''
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
depr('Parameter order of Bottle.mount() changed.', True) # 0.10
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
try:
_raise(*exc_info)
finally:
exc_info = None
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
def _iter_chunked(self, read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
''' read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. '''
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request to large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request to large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
''' True if Chunked transfer encoding was. '''
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958
data = data.list or []
for item in data:
if item.filename is None:
post[item.name] = item.value
else:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
''' The Content-Type header as a lowercase-string (default: empty). '''
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(key):
if '\n' in key or '\r' in key or '\0' in key:
raise ValueError("Header names must not contain control characters: %r" % key)
return key.title().replace('_', '-')
def _hval(value):
value = tonat(value)
if '\n' in value or '\r' in value or '\0' in value:
raise ValueError("Header value must not contain control characters: %r" % value)
return value
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=None, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.get_header(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj[self.name] = self.writer(value) if self.writer else value
def __delete__(self, obj):
del obj[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
''' Returns a copy of self. '''
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [_hval(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
self._headers[_hkey(name)] = [_hval(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(_hval(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', _hval(c.OutputString())))
if py3k:
out = [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def local_property(name=None):
if name: depr('local_property() is deprecated and will be removed.') #0.12
ls = threading.local()
def fget(self):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(self, value): ls.var = value
def fdel(self): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
''' A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). '''
bind = BaseRequest.__init__
environ = local_property()
class LocalResponse(BaseResponse):
''' A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
'''
bind = BaseResponse.__init__
_status_line = local_property()
_status_code = local_property()
_cookies = local_property()
_headers = local_property()
body = local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, response):
response._status_code = self._status_code
response._status_line = self._status_line
response._headers = self._headers
response._cookies = self._cookies
response.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, route):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
''' Return the value as a unicode string, or the default. '''
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [_hval(value)]
def append(self, key, value): self.dict.setdefault(_hkey(key), []).append(_hval(value))
def replace(self, key, value): self.dict[_hkey(key)] = [_hval(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in (_hkey(n) for n in names):
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
This storage is optimized for fast read access. Retrieving a key
or using non-altering dict methods (e.g. `dict.get()`) has no overhead
compared to a native dict.
'''
__slots__ = ('_meta', '_on_change')
class Namespace(DictMixin):
def __init__(self, config, namespace):
self._config = config
self._prefix = namespace
def __getitem__(self, key):
depr('Accessing namespaces as dicts is discouraged. '
'Only use flat item access: '
'cfg["names"]["pace"]["key"] -> cfg["name.space.key"]') #0.12
return self._config[self._prefix + '.' + key]
def __setitem__(self, key, value):
self._config[self._prefix + '.' + key] = value
def __delitem__(self, key):
del self._config[self._prefix + '.' + key]
def __iter__(self):
ns_prefix = self._prefix + '.'
for key in self._config:
ns, dot, name = key.rpartition('.')
if ns == self._prefix and name:
yield name
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._prefix + '.' + key in self._config
def __repr__(self): return '<Config.Namespace %s.*>' % self._prefix
def __str__(self): return '<Config.Namespace %s.*>' % self._prefix
# Deprecated ConfigDict features
def __getattr__(self, key):
depr('Attribute access is deprecated.') #0.12
if key not in self and key[0].isupper():
self[key] = ConfigDict.Namespace(self._config, self._prefix + '.' + key)
if key not in self and key.startswith('__'):
raise AttributeError(key)
return self.get(key)
def __setattr__(self, key, value):
if key in ('_config', '_prefix'):
self.__dict__[key] = value
return
depr('Attribute assignment is deprecated.') #0.12
if hasattr(DictMixin, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], self.__class__):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self:
val = self.pop(key)
if isinstance(val, self.__class__):
prefix = key + '.'
for key in self:
if key.startswith(prefix):
del self[prefix+key]
def __call__(self, *a, **ka):
depr('Calling ConfDict is deprecated. Use the update() method.') #0.12
self.update(*a, **ka)
return self
def __init__(self, *a, **ka):
self._meta = {}
self._on_change = lambda name, value: None
if a or ka:
depr('Constructor does no longer accept parameters.') #0.12
self.update(*a, **ka)
def load_config(self, filename):
''' Load values from an *.ini style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
'''
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace='', make_namespaces=False):
''' Import values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> ConfigDict().load_dict({'name': {'space': {'key': 'value'}}})
{'name.space.key': 'value'}
'''
stack = [(namespace, source)]
while stack:
prefix, source = stack.pop()
if not isinstance(source, dict):
raise TypeError('Source is not a dict (r)' % type(key))
for key, value in source.items():
if not isinstance(key, basestring):
raise TypeError('Key is not a string (%r)' % type(key))
full_key = prefix + '.' + key if prefix else key
if isinstance(value, dict):
stack.append((full_key, value))
if make_namespaces:
self[full_key] = self.Namespace(self, full_key)
else:
self[full_key] = value
return self
def update(self, *a, **ka):
''' If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` '''
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
def clear(self):
for key in self:
del self[key]
def meta_get(self, key, metafield, default=None):
''' Return the value of a meta field for a key. '''
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
''' Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. '''
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
''' Return an iterable of meta field names defined for a key. '''
return self._meta.get(key, {}).keys()
# Deprecated ConfigDict features
def __getattr__(self, key):
depr('Attribute access is deprecated.') #0.12
if key not in self and key[0].isupper():
self[key] = self.Namespace(self, key)
if key not in self and key.startswith('__'):
raise AttributeError(key)
return self.get(key)
def __setattr__(self, key, value):
if key in self.__slots__:
return dict.__setattr__(self, key, value)
depr('Attribute assignment is deprecated.') #0.12
if hasattr(dict, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], self.Namespace):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self:
val = self.pop(key)
if isinstance(val, self.Namespace):
prefix = key + '.'
for key in self:
if key.startswith(prefix):
del self[prefix+key]
def __call__(self, *a, **ka):
depr('Calling ConfDict is deprecated. Use the update() method.') #0.12
self.update(*a, **ka)
return self
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
''' This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). '''
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
''' Wrapper for file uploads. '''
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
def get_header(self, name, default=None):
""" Return the value of a header within the mulripart part. """
return self.headers.get(name, default)
@cached_property
def filename(self):
''' Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
'''
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
''' Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
'''
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
''' Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=hashlib.md5).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg, digestmod=hashlib.md5).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n',' ')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
from wsgiref.simple_server import make_server
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
srv = make_server(self.host, self.port, app, server_cls, handler_cls)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
depr(0, 13, "The wsgi server part of cherrypy was split into a new "
"project called 'cheroot'.", "Use the 'cheroot' server "
"adapter instead of cherrypy.")
from cherrypy import wsgiserver # This will fail for CherryPy >= 9
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class CherootServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cheroot import wsgi
from cheroot.ssl import builtin
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.pop('certfile', None)
keyfile = self.options.pop('keyfile', None)
chainfile = self.options.pop('chainfile', None)
server = wsgi.Server(**self.options)
if certfile and keyfile:
server.ssl_adapter = builtin.BuiltinSSLAdapter(
certfile, keyfile, chainfile)
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if self.options.pop('fast', None):
depr('The "fast" option has been deprecated and removed by Gevent.')
if self.quiet:
self.options['log'] = None
address = (self.host, self.port)
server = pywsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
try:
wsgi.server(listen((self.host, self.port)), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
CherootServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'cheroot': CherootServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO':GeventSocketIOServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '') or ''
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.') #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.') #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
if _name is None:
depr('Rebase function called without arguments.'
' You were probably looking for {{base}}?', True) #0.12
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
if _name is None:
depr('Rebase function called without arguments.'
' You were probably looking for {{base}}?', True) #0.12
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
''' Parser for stpl templates. '''
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '([urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Open and close grouping tokens
_re_tok += '|([\\[\\{\\(])'
_re_tok += '|([\\]\\}\\)])'
# 5,6: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 7: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 8: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=\\r?$))'
# 9: And finally, a single newline. The 10th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))(%%?)'
# Match inline statements (may contain python strings)
_re_inl = '(?m)%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl
_re_tok = '(?m)' + _re_tok
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
''' Tokens as a space separated string (default: <% %> % {{ }}) '''
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
self.offset += m.end()
if m.group(1): # New escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+m.group(5)+line+sep)
self.offset += len(line+sep)+1
continue
elif m.group(5): # Old escape syntax
depr('Escape code lines with a backslash.') #0.12
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+line+sep)
self.offset += len(line+sep)+1
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if (code_line or self.paren_depth > 0) and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
def process_inline(self, chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
line, comment = self.fix_backward_compatibility(line, comment)
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def fix_backward_compatibility(self, line, comment):
parts = line.strip().split(None, 2)
if parts and parts[0] in ('include', 'rebase'):
depr('The include and rebase keywords are functions now.') #0.12
if len(parts) == 1: return "_printlist([base])", comment
elif len(parts) == 2: return "_=%s(%r)" % tuple(parts), comment
else: return "_=%s(%r, %s)" % tuple(parts), comment
if self.lineno <= 2 and not line.strip() and 'coding' in comment:
m = re.match(r"#.*coding[:=]\s*([-\w.]+)", comment)
if m:
depr('PEP263 encoding strings in templates are deprecated.') #0.12
enc = m.group(1)
self.source = self.source.encode(self.encoding).decode(enc)
self.encoding = enc
return line, comment.replace('coding','coding*')
return line, comment
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[422] = "Unprocessable Entity" # RFC 4918
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
|
PYSEC-2022-227
|
django/contrib/gis/db/models/aggregates.py
|
@@ -1,7 +1,7 @@
from django.contrib.gis.db.models.fields import (
ExtentField, GeometryCollectionField, GeometryField, LineStringField,
)
-from django.db.models import Aggregate
+from django.db.models import Aggregate, Value
from django.utils.functional import cached_property
__all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union']
@@ -27,9 +27,16 @@ def as_sql(self, compiler, connection, function=None, **extra_context):
)
def as_oracle(self, compiler, connection, **extra_context):
- tolerance = self.extra.get('tolerance') or getattr(self, 'tolerance', 0.05)
- template = None if self.is_extent else '%(function)s(SDOAGGRTYPE(%(expressions)s,%(tolerance)s))'
- return self.as_sql(compiler, connection, template=template, tolerance=tolerance, **extra_context)
+ if not self.is_extent:
+ tolerance = self.extra.get('tolerance') or getattr(self, 'tolerance', 0.05)
+ clone = self.copy()
+ clone.set_source_expressions([
+ *self.get_source_expressions(),
+ Value(tolerance),
+ ])
+ template = '%(function)s(SDOAGGRTYPE(%(expressions)s))'
+ return clone.as_sql(compiler, connection, template=template, **extra_context)
+ return self.as_sql(compiler, connection, **extra_context)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
|
from django.contrib.gis.db.models.fields import (
ExtentField, GeometryCollectionField, GeometryField, LineStringField,
)
from django.db.models import Aggregate
from django.utils.functional import cached_property
__all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union']
class GeoAggregate(Aggregate):
function = None
is_extent = False
@cached_property
def output_field(self):
return self.output_field_class(self.source_expressions[0].output_field.srid)
def as_sql(self, compiler, connection, function=None, **extra_context):
# this will be called again in parent, but it's needed now - before
# we get the spatial_aggregate_name
connection.ops.check_expression_support(self)
return super().as_sql(
compiler,
connection,
function=function or connection.ops.spatial_aggregate_name(self.name),
**extra_context
)
def as_oracle(self, compiler, connection, **extra_context):
tolerance = self.extra.get('tolerance') or getattr(self, 'tolerance', 0.05)
template = None if self.is_extent else '%(function)s(SDOAGGRTYPE(%(expressions)s,%(tolerance)s))'
return self.as_sql(compiler, connection, template=template, tolerance=tolerance, **extra_context)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
for expr in c.get_source_expressions():
if not hasattr(expr.field, 'geom_type'):
raise ValueError('Geospatial aggregates only allowed on geometry fields.')
return c
class Collect(GeoAggregate):
name = 'Collect'
output_field_class = GeometryCollectionField
class Extent(GeoAggregate):
name = 'Extent'
is_extent = '2D'
def __init__(self, expression, **extra):
super().__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection):
return connection.ops.convert_extent(value)
class Extent3D(GeoAggregate):
name = 'Extent3D'
is_extent = '3D'
def __init__(self, expression, **extra):
super().__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection):
return connection.ops.convert_extent3d(value)
class MakeLine(GeoAggregate):
name = 'MakeLine'
output_field_class = LineStringField
class Union(GeoAggregate):
name = 'Union'
output_field_class = GeometryField
|
GHSA-3gh2-xw74-jmcw
|
django/contrib/gis/db/models/functions.py
|
@@ -111,12 +111,14 @@ class OracleToleranceMixin:
tolerance = 0.05
def as_oracle(self, compiler, connection, **extra_context):
- tol = self.extra.get('tolerance', self.tolerance)
- return self.as_sql(
- compiler, connection,
- template="%%(function)s(%%(expressions)s, %s)" % tol,
- **extra_context
- )
+ tolerance = Value(self._handle_param(
+ self.extra.get('tolerance', self.tolerance),
+ 'tolerance',
+ NUMERIC_TYPES,
+ ))
+ clone = self.copy()
+ clone.set_source_expressions([*self.get_source_expressions(), tolerance])
+ return clone.as_sql(compiler, connection, **extra_context)
class Area(OracleToleranceMixin, GeoFunc):
|
from decimal import Decimal
from django.contrib.gis.db.models.fields import BaseSpatialField, GeometryField
from django.contrib.gis.db.models.sql import AreaField, DistanceField
from django.contrib.gis.geos import GEOSGeometry
from django.core.exceptions import FieldError
from django.db import NotSupportedError
from django.db.models import (
BinaryField, BooleanField, FloatField, Func, IntegerField, TextField,
Transform, Value,
)
from django.db.models.functions import Cast
from django.utils.functional import cached_property
NUMERIC_TYPES = (int, float, Decimal)
class GeoFuncMixin:
function = None
geom_param_pos = (0,)
def __init__(self, *expressions, **extra):
super().__init__(*expressions, **extra)
# Ensure that value expressions are geometric.
for pos in self.geom_param_pos:
expr = self.source_expressions[pos]
if not isinstance(expr, Value):
continue
try:
output_field = expr.output_field
except FieldError:
output_field = None
geom = expr.value
if not isinstance(geom, GEOSGeometry) or output_field and not isinstance(output_field, GeometryField):
raise TypeError("%s function requires a geometric argument in position %d." % (self.name, pos + 1))
if not geom.srid and not output_field:
raise ValueError("SRID is required for all geometries.")
if not output_field:
self.source_expressions[pos] = Value(geom, output_field=GeometryField(srid=geom.srid))
@property
def name(self):
return self.__class__.__name__
@cached_property
def geo_field(self):
return self.source_expressions[self.geom_param_pos[0]].field
def as_sql(self, compiler, connection, function=None, **extra_context):
if self.function is None and function is None:
function = connection.ops.spatial_function_name(self.name)
return super().as_sql(compiler, connection, function=function, **extra_context)
def resolve_expression(self, *args, **kwargs):
res = super().resolve_expression(*args, **kwargs)
# Ensure that expressions are geometric.
source_fields = res.get_source_fields()
for pos in self.geom_param_pos:
field = source_fields[pos]
if not isinstance(field, GeometryField):
raise TypeError(
"%s function requires a GeometryField in position %s, got %s." % (
self.name, pos + 1, type(field).__name__,
)
)
base_srid = res.geo_field.srid
for pos in self.geom_param_pos[1:]:
expr = res.source_expressions[pos]
expr_srid = expr.output_field.srid
if expr_srid != base_srid:
# Automatic SRID conversion so objects are comparable.
res.source_expressions[pos] = Transform(expr, base_srid).resolve_expression(*args, **kwargs)
return res
def _handle_param(self, value, param_name='', check_types=None):
if not hasattr(value, 'resolve_expression'):
if check_types and not isinstance(value, check_types):
raise TypeError(
"The %s parameter has the wrong type: should be %s." % (
param_name, check_types)
)
return value
class GeoFunc(GeoFuncMixin, Func):
pass
class GeomOutputGeoFunc(GeoFunc):
@cached_property
def output_field(self):
return GeometryField(srid=self.geo_field.srid)
class SQLiteDecimalToFloatMixin:
"""
By default, Decimal values are converted to str by the SQLite backend, which
is not acceptable by the GIS functions expecting numeric values.
"""
def as_sqlite(self, compiler, connection, **extra_context):
for expr in self.get_source_expressions():
if hasattr(expr, 'value') and isinstance(expr.value, Decimal):
expr.value = float(expr.value)
return super().as_sql(compiler, connection, **extra_context)
class OracleToleranceMixin:
tolerance = 0.05
def as_oracle(self, compiler, connection, **extra_context):
tol = self.extra.get('tolerance', self.tolerance)
return self.as_sql(
compiler, connection,
template="%%(function)s(%%(expressions)s, %s)" % tol,
**extra_context
)
class Area(OracleToleranceMixin, GeoFunc):
arity = 1
@cached_property
def output_field(self):
return AreaField(self.geo_field)
def as_sql(self, compiler, connection, **extra_context):
if not connection.features.supports_area_geodetic and self.geo_field.geodetic(connection):
raise NotSupportedError('Area on geodetic coordinate systems not supported.')
return super().as_sql(compiler, connection, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
if self.geo_field.geodetic(connection):
extra_context['template'] = '%(function)s(%(expressions)s, %(spheroid)d)'
extra_context['spheroid'] = True
return self.as_sql(compiler, connection, **extra_context)
class Azimuth(GeoFunc):
output_field = FloatField()
arity = 2
geom_param_pos = (0, 1)
class AsGeoJSON(GeoFunc):
output_field = TextField()
def __init__(self, expression, bbox=False, crs=False, precision=8, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', int))
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
if options:
expressions.append(options)
super().__init__(*expressions, **extra)
def as_oracle(self, compiler, connection, **extra_context):
source_expressions = self.get_source_expressions()
clone = self.copy()
clone.set_source_expressions(source_expressions[:1])
return super(AsGeoJSON, clone).as_sql(compiler, connection, **extra_context)
class AsGML(GeoFunc):
geom_param_pos = (1,)
output_field = TextField()
def __init__(self, expression, version=2, precision=8, **extra):
expressions = [version, expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', int))
super().__init__(*expressions, **extra)
def as_oracle(self, compiler, connection, **extra_context):
source_expressions = self.get_source_expressions()
version = source_expressions[0]
clone = self.copy()
clone.set_source_expressions([source_expressions[1]])
extra_context['function'] = 'SDO_UTIL.TO_GML311GEOMETRY' if version.value == 3 else 'SDO_UTIL.TO_GMLGEOMETRY'
return super(AsGML, clone).as_sql(compiler, connection, **extra_context)
class AsKML(AsGML):
def as_sqlite(self, compiler, connection, **extra_context):
# No version parameter
clone = self.copy()
clone.set_source_expressions(self.get_source_expressions()[1:])
return clone.as_sql(compiler, connection, **extra_context)
class AsSVG(GeoFunc):
output_field = TextField()
def __init__(self, expression, relative=False, precision=8, **extra):
relative = relative if hasattr(relative, 'resolve_expression') else int(relative)
expressions = [
expression,
relative,
self._handle_param(precision, 'precision', int),
]
super().__init__(*expressions, **extra)
class AsWKB(GeoFunc):
output_field = BinaryField()
arity = 1
class AsWKT(GeoFunc):
output_field = TextField()
arity = 1
class BoundingCircle(OracleToleranceMixin, GeoFunc):
def __init__(self, expression, num_seg=48, **extra):
super().__init__(expression, num_seg, **extra)
def as_oracle(self, compiler, connection, **extra_context):
clone = self.copy()
clone.set_source_expressions([self.get_source_expressions()[0]])
return super(BoundingCircle, clone).as_oracle(compiler, connection, **extra_context)
class Centroid(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 1
class Difference(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
class DistanceResultMixin:
@cached_property
def output_field(self):
return DistanceField(self.geo_field)
def source_is_geography(self):
return self.geo_field.geography and self.geo_field.srid == 4326
class Distance(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
geom_param_pos = (0, 1)
spheroid = None
def __init__(self, expr1, expr2, spheroid=None, **extra):
expressions = [expr1, expr2]
if spheroid is not None:
self.spheroid = self._handle_param(spheroid, 'spheroid', bool)
super().__init__(*expressions, **extra)
def as_postgresql(self, compiler, connection, **extra_context):
clone = self.copy()
function = None
expr2 = clone.source_expressions[1]
geography = self.source_is_geography()
if expr2.output_field.geography != geography:
if isinstance(expr2, Value):
expr2.output_field.geography = geography
else:
clone.source_expressions[1] = Cast(
expr2,
GeometryField(srid=expr2.output_field.srid, geography=geography),
)
if not geography and self.geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need special distance functions
if self.spheroid:
# DistanceSpheroid is more accurate and resource intensive than DistanceSphere
function = connection.ops.spatial_function_name('DistanceSpheroid')
# Replace boolean param by the real spheroid of the base field
clone.source_expressions.append(Value(self.geo_field.spheroid(connection)))
else:
function = connection.ops.spatial_function_name('DistanceSphere')
return super(Distance, clone).as_sql(compiler, connection, function=function, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
if self.geo_field.geodetic(connection):
# SpatiaLite returns NULL instead of zero on geodetic coordinates
extra_context['template'] = 'COALESCE(%(function)s(%(expressions)s, %(spheroid)s), 0)'
extra_context['spheroid'] = int(bool(self.spheroid))
return super().as_sql(compiler, connection, **extra_context)
class Envelope(GeomOutputGeoFunc):
arity = 1
class ForcePolygonCW(GeomOutputGeoFunc):
arity = 1
class GeoHash(GeoFunc):
output_field = TextField()
def __init__(self, expression, precision=None, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', int))
super().__init__(*expressions, **extra)
def as_mysql(self, compiler, connection, **extra_context):
clone = self.copy()
# If no precision is provided, set it to the maximum.
if len(clone.source_expressions) < 2:
clone.source_expressions.append(Value(100))
return clone.as_sql(compiler, connection, **extra_context)
class GeometryDistance(GeoFunc):
output_field = FloatField()
arity = 2
function = ''
arg_joiner = ' <-> '
geom_param_pos = (0, 1)
class Intersection(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
@BaseSpatialField.register_lookup
class IsValid(OracleToleranceMixin, GeoFuncMixin, Transform):
lookup_name = 'isvalid'
output_field = BooleanField()
def as_oracle(self, compiler, connection, **extra_context):
sql, params = super().as_oracle(compiler, connection, **extra_context)
return "CASE %s WHEN 'TRUE' THEN 1 ELSE 0 END" % sql, params
class Length(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
def __init__(self, expr1, spheroid=True, **extra):
self.spheroid = spheroid
super().__init__(expr1, **extra)
def as_sql(self, compiler, connection, **extra_context):
if self.geo_field.geodetic(connection) and not connection.features.supports_length_geodetic:
raise NotSupportedError("This backend doesn't support Length on geodetic fields")
return super().as_sql(compiler, connection, **extra_context)
def as_postgresql(self, compiler, connection, **extra_context):
clone = self.copy()
function = None
if self.source_is_geography():
clone.source_expressions.append(Value(self.spheroid))
elif self.geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need length_spheroid
function = connection.ops.spatial_function_name('LengthSpheroid')
clone.source_expressions.append(Value(self.geo_field.spheroid(connection)))
else:
dim = min(f.dim for f in self.get_source_fields() if f)
if dim > 2:
function = connection.ops.length3d
return super(Length, clone).as_sql(compiler, connection, function=function, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
function = None
if self.geo_field.geodetic(connection):
function = 'GeodesicLength' if self.spheroid else 'GreatCircleLength'
return super().as_sql(compiler, connection, function=function, **extra_context)
class LineLocatePoint(GeoFunc):
output_field = FloatField()
arity = 2
geom_param_pos = (0, 1)
class MakeValid(GeomOutputGeoFunc):
pass
class MemSize(GeoFunc):
output_field = IntegerField()
arity = 1
class NumGeometries(GeoFunc):
output_field = IntegerField()
arity = 1
class NumPoints(GeoFunc):
output_field = IntegerField()
arity = 1
class Perimeter(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
arity = 1
def as_postgresql(self, compiler, connection, **extra_context):
function = None
if self.geo_field.geodetic(connection) and not self.source_is_geography():
raise NotSupportedError("ST_Perimeter cannot use a non-projected non-geography field.")
dim = min(f.dim for f in self.get_source_fields())
if dim > 2:
function = connection.ops.perimeter3d
return super().as_sql(compiler, connection, function=function, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
if self.geo_field.geodetic(connection):
raise NotSupportedError("Perimeter cannot use a non-projected field.")
return super().as_sql(compiler, connection, **extra_context)
class PointOnSurface(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 1
class Reverse(GeoFunc):
arity = 1
class Scale(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc):
def __init__(self, expression, x, y, z=0.0, **extra):
expressions = [
expression,
self._handle_param(x, 'x', NUMERIC_TYPES),
self._handle_param(y, 'y', NUMERIC_TYPES),
]
if z != 0.0:
expressions.append(self._handle_param(z, 'z', NUMERIC_TYPES))
super().__init__(*expressions, **extra)
class SnapToGrid(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc):
def __init__(self, expression, *args, **extra):
nargs = len(args)
expressions = [expression]
if nargs in (1, 2):
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args]
)
elif nargs == 4:
# Reverse origin and size param ordering
expressions += [
*(self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[2:]),
*(self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[0:2]),
]
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `SnapToGrid`.')
super().__init__(*expressions, **extra)
class SymDifference(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
class Transform(GeomOutputGeoFunc):
def __init__(self, expression, srid, **extra):
expressions = [
expression,
self._handle_param(srid, 'srid', int),
]
if 'output_field' not in extra:
extra['output_field'] = GeometryField(srid=srid)
super().__init__(*expressions, **extra)
class Translate(Scale):
def as_sqlite(self, compiler, connection, **extra_context):
clone = self.copy()
if len(self.source_expressions) < 4:
# Always provide the z parameter for ST_Translate
clone.source_expressions.append(Value(0))
return super(Translate, clone).as_sqlite(compiler, connection, **extra_context)
class Union(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
|
GHSA-3gh2-xw74-jmcw
|
tests/gis_tests/distapp/tests.py
|
@@ -434,6 +434,37 @@ def test_distance_function_d_lookup(self):
).filter(d=D(m=1))
self.assertTrue(qs.exists())
+ @unittest.skipUnless(
+ connection.vendor == 'oracle',
+ 'Oracle supports tolerance paremeter.',
+ )
+ def test_distance_function_tolerance_escaping(self):
+ qs = Interstate.objects.annotate(
+ d=Distance(
+ Point(500, 500, srid=3857),
+ Point(0, 0, srid=3857),
+ tolerance='0.05) = 1 OR 1=1 OR (1+1',
+ ),
+ ).filter(d=D(m=1)).values('pk')
+ msg = 'The tolerance parameter has the wrong type'
+ with self.assertRaisesMessage(TypeError, msg):
+ qs.exists()
+
+ @unittest.skipUnless(
+ connection.vendor == 'oracle',
+ 'Oracle supports tolerance paremeter.',
+ )
+ def test_distance_function_tolerance(self):
+ # Tolerance is greater than distance.
+ qs = Interstate.objects.annotate(
+ d=Distance(
+ Point(0, 0, srid=3857),
+ Point(1, 1, srid=3857),
+ tolerance=1.5,
+ ),
+ ).filter(d=0).values('pk')
+ self.assertIs(qs.exists(), True)
+
@skipIfDBFeature("supports_distance_geodetic")
@skipUnlessDBFeature("has_Distance_function")
def test_distance_function_raw_result_d_lookup(self):
|
import unittest
from django.contrib.gis.db.models.functions import (
Area, Distance, Length, Perimeter, Transform, Union,
)
from django.contrib.gis.geos import GEOSGeometry, LineString, Point
from django.contrib.gis.measure import D # alias for Distance
from django.db import NotSupportedError, connection
from django.db.models import Exists, F, OuterRef, Q
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from ..utils import (
FuncTestMixin, mysql, no_oracle, oracle, postgis, spatialite,
)
from .models import (
AustraliaCity, CensusZipcode, Interstate, SouthTexasCity, SouthTexasCityFt,
SouthTexasInterstate, SouthTexasZipcode,
)
class DistanceTest(TestCase):
fixtures = ['initial']
def setUp(self):
# A point we are testing distances with -- using a WGS84
# coordinate that'll be implicitly transformed to that to
# the coordinate system of the field, EPSG:32140 (Texas South Central
# w/units in meters)
self.stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326)
# Another one for Australia
self.au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326)
def get_names(self, qs):
cities = [c.name for c in qs]
cities.sort()
return cities
def test_init(self):
"""
Test initialization of distance models.
"""
self.assertEqual(9, SouthTexasCity.objects.count())
self.assertEqual(9, SouthTexasCityFt.objects.count())
self.assertEqual(11, AustraliaCity.objects.count())
self.assertEqual(4, SouthTexasZipcode.objects.count())
self.assertEqual(4, CensusZipcode.objects.count())
self.assertEqual(1, Interstate.objects.count())
self.assertEqual(1, SouthTexasInterstate.objects.count())
@skipUnlessDBFeature("supports_dwithin_lookup")
def test_dwithin(self):
"""
Test the `dwithin` lookup type.
"""
# Distances -- all should be equal (except for the
# degree/meter pair in au_cities, that's somewhat
# approximate).
tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)]
au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)]
# Expected cities for Australia and Texas.
tx_cities = ['Downtown Houston', 'Southside Place']
au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong']
# Performing distance queries on two projected coordinate systems one
# with units in meters and the other in units of U.S. survey feet.
for dist in tx_dists:
if isinstance(dist, tuple):
dist1, dist2 = dist
else:
dist1 = dist2 = dist
qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1))
qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2))
for qs in qs1, qs2:
with self.subTest(dist=dist, qs=qs):
self.assertEqual(tx_cities, self.get_names(qs))
# With a complex geometry expression
self.assertFalse(SouthTexasCity.objects.exclude(point__dwithin=(Union('point', 'point'), 0)))
# Now performing the `dwithin` queries on a geodetic coordinate system.
for dist in au_dists:
with self.subTest(dist=dist):
type_error = isinstance(dist, D) and not oracle
if isinstance(dist, tuple):
if oracle or spatialite:
# Result in meters
dist = dist[1]
else:
# Result in units of the field
dist = dist[0]
# Creating the query set.
qs = AustraliaCity.objects.order_by('name')
if type_error:
# A ValueError should be raised on PostGIS when trying to
# pass Distance objects into a DWithin query using a
# geodetic field.
with self.assertRaises(ValueError):
AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count()
else:
self.assertEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
@skipUnlessDBFeature("supports_distances_lookups")
def test_distance_lookups(self):
"""
Test the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types.
"""
# Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole'
# (thus, Houston and Southside place will be excluded as tested in
# the `test02_dwithin` above).
for model in [SouthTexasCity, SouthTexasCityFt]:
stx_pnt = self.stx_pnt.transform(model._meta.get_field('point').srid, clone=True)
qs = model.objects.filter(point__distance_gte=(stx_pnt, D(km=7))).filter(
point__distance_lte=(stx_pnt, D(km=20)),
)
cities = self.get_names(qs)
self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place'])
# Doing a distance query using Polygons instead of a Point.
z = SouthTexasZipcode.objects.get(name='77005')
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275)))
self.assertEqual(['77025', '77401'], self.get_names(qs))
# If we add a little more distance 77002 should be included.
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300)))
self.assertEqual(['77002', '77025', '77401'], self.get_names(qs))
@skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic")
def test_geodetic_distance_lookups(self):
"""
Test distance lookups on geodetic coordinate systems.
"""
# Line is from Canberra to Sydney. Query is for all other cities within
# a 100km of that line (which should exclude only Hobart & Adelaide).
line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326)
dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100)))
expected_cities = [
'Batemans Bay', 'Canberra', 'Hillsdale',
'Melbourne', 'Mittagong', 'Shellharbour',
'Sydney', 'Thirroul', 'Wollongong',
]
if spatialite:
# SpatiaLite is less accurate and returns 102.8km for Batemans Bay.
expected_cities.pop(0)
self.assertEqual(expected_cities, self.get_names(dist_qs))
msg = "2, 3, or 4-element tuple required for 'distance_lte' lookup."
with self.assertRaisesMessage(ValueError, msg): # Too many params.
len(AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4', None)))
with self.assertRaisesMessage(ValueError, msg): # Too few params.
len(AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',)))
msg = "For 4-element tuples the last argument must be the 'spheroid' directive."
with self.assertRaisesMessage(ValueError, msg):
len(AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')))
# Getting all cities w/in 550 miles of Hobart.
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550)))
cities = self.get_names(qs)
self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne'])
# Cities that are either really close or really far from Wollongong --
# and using different units of distance.
wollongong = AustraliaCity.objects.get(name='Wollongong')
d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles.
# Normal geodetic distance lookup (uses `distance_sphere` on PostGIS.
gq1 = Q(point__distance_lte=(wollongong.point, d1))
gq2 = Q(point__distance_gte=(wollongong.point, d2))
qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2)
# Geodetic distance lookup but telling GeoDjango to use `distance_spheroid`
# instead (we should get the same results b/c accuracy variance won't matter
# in this test case).
querysets = [qs1]
if connection.features.has_DistanceSpheroid_function:
gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid'))
gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid'))
qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4)
querysets.append(qs2)
for qs in querysets:
cities = self.get_names(qs)
self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul'])
@skipUnlessDBFeature("supports_distances_lookups")
def test_distance_lookups_with_expression_rhs(self):
stx_pnt = self.stx_pnt.transform(SouthTexasCity._meta.get_field('point').srid, clone=True)
qs = SouthTexasCity.objects.filter(
point__distance_lte=(stx_pnt, F('radius')),
).order_by('name')
self.assertEqual(
self.get_names(qs),
['Bellaire', 'Downtown Houston', 'Southside Place', 'West University Place']
)
# With a combined expression
qs = SouthTexasCity.objects.filter(
point__distance_lte=(stx_pnt, F('radius') * 2),
).order_by('name')
self.assertEqual(len(qs), 5)
self.assertIn('Pearland', self.get_names(qs))
# With spheroid param
if connection.features.supports_distance_geodetic:
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.filter(
point__distance_lte=(hobart.point, F('radius') * 70, 'spheroid'),
).order_by('name')
self.assertEqual(self.get_names(qs), ['Canberra', 'Hobart', 'Melbourne'])
# With a complex geometry expression
self.assertFalse(SouthTexasCity.objects.filter(point__distance_gt=(Union('point', 'point'), 0)))
self.assertEqual(
SouthTexasCity.objects.filter(point__distance_lte=(Union('point', 'point'), 0)).count(),
SouthTexasCity.objects.count(),
)
@unittest.skipUnless(mysql, 'This is a MySQL-specific test')
def test_mysql_geodetic_distance_error(self):
msg = 'Only numeric values of degree units are allowed on geodetic distance queries.'
with self.assertRaisesMessage(ValueError, msg):
AustraliaCity.objects.filter(point__distance_lte=(Point(0, 0), D(m=100))).exists()
@skipUnlessDBFeature('supports_dwithin_lookup')
def test_dwithin_subquery(self):
"""dwithin lookup in a subquery using OuterRef as a parameter."""
qs = CensusZipcode.objects.annotate(
annotated_value=Exists(SouthTexasCity.objects.filter(
point__dwithin=(OuterRef('poly'), D(m=10)),
))
).filter(annotated_value=True)
self.assertEqual(self.get_names(qs), ['77002', '77025', '77401'])
@skipUnlessDBFeature('supports_dwithin_lookup', 'supports_dwithin_distance_expr')
def test_dwithin_with_expression_rhs(self):
# LineString of Wollongong and Adelaide coords.
ls = LineString(((150.902, -34.4245), (138.6, -34.9258)), srid=4326)
qs = AustraliaCity.objects.filter(
point__dwithin=(ls, F('allowed_distance')),
).order_by('name')
self.assertEqual(
self.get_names(qs),
['Adelaide', 'Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong'],
)
@skipIfDBFeature('supports_dwithin_distance_expr')
def test_dwithin_with_expression_rhs_not_supported(self):
ls = LineString(((150.902, -34.4245), (138.6, -34.9258)), srid=4326)
msg = (
'This backend does not support expressions for specifying '
'distance in the dwithin lookup.'
)
with self.assertRaisesMessage(NotSupportedError, msg):
list(AustraliaCity.objects.filter(
point__dwithin=(ls, F('allowed_distance')),
))
'''
=============================
Distance functions on PostGIS
=============================
| Projected Geometry | Lon/lat Geometry | Geography (4326)
ST_Distance(geom1, geom2) | OK (meters) | :-( (degrees) | OK (meters)
ST_Distance(geom1, geom2, use_spheroid=False) | N/A | N/A | OK (meters), less accurate, quick
Distance_Sphere(geom1, geom2) | N/A | OK (meters) | N/A
Distance_Spheroid(geom1, geom2, spheroid) | N/A | OK (meters) | N/A
ST_Perimeter(geom1) | OK | :-( (degrees) | OK
================================
Distance functions on SpatiaLite
================================
| Projected Geometry | Lon/lat Geometry
ST_Distance(geom1, geom2) | OK (meters) | N/A
ST_Distance(geom1, geom2, use_ellipsoid=True) | N/A | OK (meters)
ST_Distance(geom1, geom2, use_ellipsoid=False) | N/A | OK (meters), less accurate, quick
Perimeter(geom1) | OK | :-( (degrees)
''' # NOQA
class DistanceFunctionsTests(FuncTestMixin, TestCase):
fixtures = ['initial']
@skipUnlessDBFeature("has_Area_function")
def test_area(self):
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461]
# Tolerance has to be lower for Oracle
tol = 2
for i, z in enumerate(SouthTexasZipcode.objects.annotate(area=Area('poly')).order_by('name')):
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol)
@skipUnlessDBFeature("has_Distance_function")
def test_distance_simple(self):
"""
Test a simple distance query, with projected coordinates and without
transformation.
"""
lagrange = GEOSGeometry('POINT(805066.295722839 4231496.29461335)', 32140)
houston = SouthTexasCity.objects.annotate(dist=Distance('point', lagrange)).order_by('id').first()
tol = 2 if oracle else 5
self.assertAlmostEqual(
houston.dist.m,
147075.069813,
tol
)
@skipUnlessDBFeature("has_Distance_function", "has_Transform_function")
def test_distance_projected(self):
"""
Test the `Distance` function on projected coordinate systems.
"""
# The point for La Grange, TX
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140))
# FROM distapp_southtexascity;
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278))
# FROM distapp_southtexascityft;
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.annotate(distance=Distance('point', lagrange)).order_by('id')
dist2 = SouthTexasCityFt.objects.annotate(distance=Distance('point', lagrange)).order_by('id')
dist_qs = [dist1, dist2]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
tol = 2 if oracle else 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
with self.subTest(c=c):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_geodetic(self):
"""
Test the `Distance` function on geodetic coordinate systems.
"""
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString(((150.902, -34.4245), (150.87, -34.5789)), srid=4326)
# Reference query:
# SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326))
# FROM distapp_australiacity ORDER BY name;
distances = [1120954.92533513, 140575.720018241, 640396.662906304,
60580.9693849269, 972807.955955075, 568451.8357838,
40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0]
qs = AustraliaCity.objects.annotate(distance=Distance('point', ls)).order_by('name')
for city, distance in zip(qs, distances):
with self.subTest(city=city, distance=distance):
# Testing equivalence to within a meter (kilometer on SpatiaLite).
tol = -3 if spatialite else 0
self.assertAlmostEqual(distance, city.distance.m, tol)
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_geodetic_spheroid(self):
tol = 2 if oracle else 4
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326),
# 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326))
# FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
spheroid_distances = [
60504.0628957201, 77023.9489850262, 49154.8867574404,
90847.4358768573, 217402.811919332, 709599.234564757,
640011.483550888, 7772.00667991925, 1047861.78619339,
1165126.55236034,
]
sphere_distances = [
60580.9693849267, 77144.0435286473, 49199.4415344719,
90804.7533823494, 217713.384600405, 709134.127242793,
639828.157159169, 7786.82949717788, 1049204.06569028,
1162623.7238134,
]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
qs = AustraliaCity.objects.exclude(id=hillsdale.id).annotate(
distance=Distance('point', hillsdale.point, spheroid=True)
).order_by('id')
for i, c in enumerate(qs):
with self.subTest(c=c):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis or spatialite:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).annotate(
distance=Distance('point', hillsdale.point)
).order_by('id')
for i, c in enumerate(qs):
with self.subTest(c=c):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@skipIfDBFeature("supports_distance_geodetic")
@skipUnlessDBFeature("has_Distance_function")
def test_distance_function_raw_result(self):
distance = Interstate.objects.annotate(
d=Distance(Point(0, 0, srid=4326), Point(0, 1, srid=4326)),
).first().d
self.assertEqual(distance, 1)
@skipUnlessDBFeature("has_Distance_function")
def test_distance_function_d_lookup(self):
qs = Interstate.objects.annotate(
d=Distance(Point(0, 0, srid=3857), Point(0, 1, srid=3857)),
).filter(d=D(m=1))
self.assertTrue(qs.exists())
@skipIfDBFeature("supports_distance_geodetic")
@skipUnlessDBFeature("has_Distance_function")
def test_distance_function_raw_result_d_lookup(self):
qs = Interstate.objects.annotate(
d=Distance(Point(0, 0, srid=4326), Point(0, 1, srid=4326)),
).filter(d=D(m=1))
msg = 'Distance measure is supplied, but units are unknown for result.'
with self.assertRaisesMessage(ValueError, msg):
list(qs)
@no_oracle # Oracle already handles geographic distance calculation.
@skipUnlessDBFeature("has_Distance_function", 'has_Transform_function')
def test_distance_transform(self):
"""
Test the `Distance` function used with `Transform` on a geographic field.
"""
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140),
# ST_GeomFromText('<buffer_wkt>', 32140))
# FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').annotate(
distance=Distance(Transform('poly', 32140), buf)
).order_by('name')
self.assertEqual(ref_zips, sorted(c.name for c in qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
@skipUnlessDBFeature("has_Distance_function")
def test_distance_order_by(self):
qs = SouthTexasCity.objects.annotate(distance=Distance('point', Point(3, 3, srid=32140))).order_by(
'distance'
).values_list('name', flat=True).filter(name__in=('San Antonio', 'Pearland'))
self.assertSequenceEqual(qs, ['San Antonio', 'Pearland'])
@skipUnlessDBFeature("has_Length_function")
def test_length(self):
"""
Test the `Length` function.
"""
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563,
# AUTHORITY["EPSG","7030"]]');
len_m1 = 473504.769553813
len_m2 = 4617.668
if connection.features.supports_length_geodetic:
qs = Interstate.objects.annotate(length=Length('path'))
tol = 2 if oracle else 3
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
# TODO: test with spheroid argument (True and False)
else:
# Does not support geodetic coordinate systems.
with self.assertRaises(NotSupportedError):
list(Interstate.objects.annotate(length=Length('path')))
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.annotate(length=Length('path')).get(name='I-10')
self.assertAlmostEqual(len_m2, i10.length.m, 2)
self.assertTrue(
SouthTexasInterstate.objects.annotate(length=Length('path')).filter(length__gt=4000).exists()
)
# Length with an explicit geometry value.
qs = Interstate.objects.annotate(length=Length(i10.path))
self.assertAlmostEqual(qs.first().length.m, len_m2, 2)
@skipUnlessDBFeature("has_Perimeter_function")
def test_perimeter(self):
"""
Test the `Perimeter` function.
"""
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode;
perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697]
tol = 2 if oracle else 7
qs = SouthTexasZipcode.objects.annotate(perimeter=Perimeter('poly')).order_by('name')
for i, z in enumerate(qs):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
qs = SouthTexasCity.objects.annotate(perim=Perimeter('point'))
for city in qs:
self.assertEqual(0, city.perim.m)
@skipUnlessDBFeature("has_Perimeter_function")
def test_perimeter_geodetic(self):
# Currently only Oracle supports calculating the perimeter on geodetic
# geometries (without being transformed).
qs1 = CensusZipcode.objects.annotate(perim=Perimeter('poly'))
if connection.features.supports_perimeter_geodetic:
self.assertAlmostEqual(qs1[0].perim.m, 18406.3818954314, 3)
else:
with self.assertRaises(NotSupportedError):
list(qs1)
# But should work fine when transformed to projected coordinates
qs2 = CensusZipcode.objects.annotate(perim=Perimeter(Transform('poly', 32140))).filter(name='77002')
self.assertAlmostEqual(qs2[0].perim.m, 18404.355, 3)
@skipUnlessDBFeature("supports_null_geometries", "has_Area_function", "has_Distance_function")
def test_measurement_null_fields(self):
"""
Test the measurement functions on fields with NULL values.
"""
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name='78212')
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name='Downtown Houston')
z = SouthTexasZipcode.objects.annotate(
distance=Distance('poly', htown.point), area=Area('poly')
).get(name='78212')
self.assertIsNone(z.distance)
self.assertIsNone(z.area)
|
GHSA-3gh2-xw74-jmcw
|
tests/gis_tests/geoapp/tests.py
|
@@ -9,7 +9,7 @@
MultiPoint, MultiPolygon, Point, Polygon, fromstr,
)
from django.core.management import call_command
-from django.db import NotSupportedError, connection
+from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import F, OuterRef, Subquery
from django.test import TestCase, skipUnlessDBFeature
@@ -594,6 +594,42 @@ def test_unionagg(self):
qs = City.objects.filter(name='NotACity')
self.assertIsNone(qs.aggregate(Union('point'))['point__union'])
+ @unittest.skipUnless(
+ connection.vendor == 'oracle',
+ 'Oracle supports tolerance paremeter.',
+ )
+ def test_unionagg_tolerance(self):
+ City.objects.create(
+ point=fromstr('POINT(-96.467222 32.751389)', srid=4326),
+ name='Forney',
+ )
+ tx = Country.objects.get(name='Texas').mpoly
+ # Tolerance is greater than distance between Forney and Dallas, that's
+ # why Dallas is ignored.
+ forney_houston = GEOSGeometry(
+ 'MULTIPOINT(-95.363151 29.763374, -96.467222 32.751389)',
+ srid=4326,
+ )
+ self.assertIs(
+ forney_houston.equals(
+ City.objects.filter(point__within=tx).aggregate(
+ Union('point', tolerance=32000),
+ )['point__union'],
+ ),
+ True,
+ )
+
+ @unittest.skipUnless(
+ connection.vendor == 'oracle',
+ 'Oracle supports tolerance paremeter.',
+ )
+ def test_unionagg_tolerance_escaping(self):
+ tx = Country.objects.get(name='Texas').mpoly
+ with self.assertRaises(DatabaseError):
+ City.objects.filter(point__within=tx).aggregate(
+ Union('point', tolerance='0.05))), (((1'),
+ )
+
def test_within_subquery(self):
"""
Using a queryset inside a geo lookup is working (using a subquery)
|
import tempfile
import unittest
from io import StringIO
from django.contrib.gis import gdal
from django.contrib.gis.db.models import Extent, MakeLine, Union, functions
from django.contrib.gis.geos import (
GeometryCollection, GEOSGeometry, LinearRing, LineString, MultiLineString,
MultiPoint, MultiPolygon, Point, Polygon, fromstr,
)
from django.core.management import call_command
from django.db import NotSupportedError, connection
from django.db.models import F, OuterRef, Subquery
from django.test import TestCase, skipUnlessDBFeature
from ..utils import (
mariadb, mysql, no_oracle, oracle, postgis, skipUnlessGISLookup,
spatialite,
)
from .models import (
City, Country, Feature, MinusOneSRID, MultiFields, NonConcreteModel,
PennsylvaniaCity, State, Track,
)
class GeoModelTest(TestCase):
fixtures = ['initial']
def test_fixtures(self):
"Testing geographic model initialization from fixtures."
# Ensuring that data was loaded from initial data fixtures.
self.assertEqual(2, Country.objects.count())
self.assertEqual(8, City.objects.count())
self.assertEqual(2, State.objects.count())
def test_proxy(self):
"Testing Lazy-Geometry support (using the GeometryProxy)."
# Testing on a Point
pnt = Point(0, 0)
nullcity = City(name='NullCity', point=pnt)
nullcity.save()
# Making sure TypeError is thrown when trying to set with an
# incompatible type.
for bad in [5, 2.0, LineString((0, 0), (1, 1))]:
with self.assertRaisesMessage(TypeError, 'Cannot set'):
nullcity.point = bad
# Now setting with a compatible GEOS Geometry, saving, and ensuring
# the save took, notice no SRID is explicitly set.
new = Point(5, 23)
nullcity.point = new
# Ensuring that the SRID is automatically set to that of the
# field after assignment, but before saving.
self.assertEqual(4326, nullcity.point.srid)
nullcity.save()
# Ensuring the point was saved correctly after saving
self.assertEqual(new, City.objects.get(name='NullCity').point)
# Setting the X and Y of the Point
nullcity.point.x = 23
nullcity.point.y = 5
# Checking assignments pre & post-save.
self.assertNotEqual(Point(23, 5, srid=4326), City.objects.get(name='NullCity').point)
nullcity.save()
self.assertEqual(Point(23, 5, srid=4326), City.objects.get(name='NullCity').point)
nullcity.delete()
# Testing on a Polygon
shell = LinearRing((0, 0), (0, 90), (100, 90), (100, 0), (0, 0))
inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40))
# Creating a State object using a built Polygon
ply = Polygon(shell, inner)
nullstate = State(name='NullState', poly=ply)
self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None
nullstate.save()
ns = State.objects.get(name='NullState')
self.assertEqual(ply, ns.poly)
# Testing the `ogr` and `srs` lazy-geometry properties.
self.assertIsInstance(ns.poly.ogr, gdal.OGRGeometry)
self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb)
self.assertIsInstance(ns.poly.srs, gdal.SpatialReference)
self.assertEqual('WGS 84', ns.poly.srs.name)
# Changing the interior ring on the poly attribute.
new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30))
ns.poly[1] = new_inner
ply[1] = new_inner
self.assertEqual(4326, ns.poly.srid)
ns.save()
self.assertEqual(ply, State.objects.get(name='NullState').poly)
ns.delete()
@skipUnlessDBFeature("supports_transform")
def test_lookup_insert_transform(self):
"Testing automatic transform for lookups and inserts."
# San Antonio in 'WGS84' (SRID 4326)
sa_4326 = 'POINT (-98.493183 29.424170)'
wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84
# San Antonio in 'WGS 84 / Pseudo-Mercator' (SRID 3857)
other_srid_pnt = wgs_pnt.transform(3857, clone=True)
# Constructing & querying with a point from a different SRID. Oracle
# `SDO_OVERLAPBDYINTERSECT` operates differently from
# `ST_Intersects`, so contains is used instead.
if oracle:
tx = Country.objects.get(mpoly__contains=other_srid_pnt)
else:
tx = Country.objects.get(mpoly__intersects=other_srid_pnt)
self.assertEqual('Texas', tx.name)
# Creating San Antonio. Remember the Alamo.
sa = City.objects.create(name='San Antonio', point=other_srid_pnt)
# Now verifying that San Antonio was transformed correctly
sa = City.objects.get(name='San Antonio')
self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6)
self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6)
# If the GeometryField SRID is -1, then we shouldn't perform any
# transformation if the SRID of the input geometry is different.
m1 = MinusOneSRID(geom=Point(17, 23, srid=4326))
m1.save()
self.assertEqual(-1, m1.geom.srid)
def test_createnull(self):
"Testing creating a model instance and the geometry being None"
c = City()
self.assertIsNone(c.point)
def test_geometryfield(self):
"Testing the general GeometryField."
Feature(name='Point', geom=Point(1, 1)).save()
Feature(name='LineString', geom=LineString((0, 0), (1, 1), (5, 5))).save()
Feature(name='Polygon', geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))).save()
Feature(name='GeometryCollection',
geom=GeometryCollection(Point(2, 2), LineString((0, 0), (2, 2)),
Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))).save()
f_1 = Feature.objects.get(name='Point')
self.assertIsInstance(f_1.geom, Point)
self.assertEqual((1.0, 1.0), f_1.geom.tuple)
f_2 = Feature.objects.get(name='LineString')
self.assertIsInstance(f_2.geom, LineString)
self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple)
f_3 = Feature.objects.get(name='Polygon')
self.assertIsInstance(f_3.geom, Polygon)
f_4 = Feature.objects.get(name='GeometryCollection')
self.assertIsInstance(f_4.geom, GeometryCollection)
self.assertEqual(f_3.geom, f_4.geom[2])
# TODO: fix on Oracle: ORA-22901: cannot compare nested table or VARRAY or
# LOB attributes of an object type.
@no_oracle
@skipUnlessDBFeature("supports_transform")
def test_inherited_geofields(self):
"Database functions on inherited Geometry fields."
# Creating a Pennsylvanian city.
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
# All transformation SQL will need to be performed on the
# _parent_ table.
qs = PennsylvaniaCity.objects.annotate(new_point=functions.Transform('point', srid=32128))
self.assertEqual(1, qs.count())
for pc in qs:
self.assertEqual(32128, pc.new_point.srid)
def test_raw_sql_query(self):
"Testing raw SQL query."
cities1 = City.objects.all()
point_select = connection.ops.select % 'point'
cities2 = list(City.objects.raw(
'select id, name, %s as point from geoapp_city' % point_select
))
self.assertEqual(len(cities1), len(cities2))
with self.assertNumQueries(0): # Ensure point isn't deferred.
self.assertIsInstance(cities2[0].point, Point)
def test_dumpdata_loaddata_cycle(self):
"""
Test a dumpdata/loaddata cycle with geographic data.
"""
out = StringIO()
original_data = list(City.objects.all().order_by('name'))
call_command('dumpdata', 'geoapp.City', stdout=out)
result = out.getvalue()
houston = City.objects.get(name='Houston')
self.assertIn('"point": "%s"' % houston.point.ewkt, result)
# Reload now dumped data
with tempfile.NamedTemporaryFile(mode='w', suffix='.json') as tmp:
tmp.write(result)
tmp.seek(0)
call_command('loaddata', tmp.name, verbosity=0)
self.assertEqual(original_data, list(City.objects.all().order_by('name')))
@skipUnlessDBFeature("supports_empty_geometries")
def test_empty_geometries(self):
geometry_classes = [
Point,
LineString,
LinearRing,
Polygon,
MultiPoint,
MultiLineString,
MultiPolygon,
GeometryCollection,
]
for klass in geometry_classes:
g = klass(srid=4326)
feature = Feature.objects.create(name='Empty %s' % klass.__name__, geom=g)
feature.refresh_from_db()
if klass is LinearRing:
# LinearRing isn't representable in WKB, so GEOSGeomtry.wkb
# uses LineString instead.
g = LineString(srid=4326)
self.assertEqual(feature.geom, g)
self.assertEqual(feature.geom.srid, g.srid)
class GeoLookupTest(TestCase):
fixtures = ['initial']
def test_disjoint_lookup(self):
"Testing the `disjoint` lookup type."
if mysql and not mariadb and connection.mysql_version < (8, 0, 0):
raise unittest.SkipTest('MySQL < 8 gives different results.')
ptown = City.objects.get(name='Pueblo')
qs1 = City.objects.filter(point__disjoint=ptown.point)
self.assertEqual(7, qs1.count())
qs2 = State.objects.filter(poly__disjoint=ptown.point)
self.assertEqual(1, qs2.count())
self.assertEqual('Kansas', qs2[0].name)
def test_contains_contained_lookups(self):
"Testing the 'contained', 'contains', and 'bbcontains' lookup types."
# Getting Texas, yes we were a country -- once ;)
texas = Country.objects.get(name='Texas')
# Seeing what cities are in Texas, should get Houston and Dallas,
# and Oklahoma City because 'contained' only checks on the
# _bounding box_ of the Geometries.
if connection.features.supports_contained_lookup:
qs = City.objects.filter(point__contained=texas.mpoly)
self.assertEqual(3, qs.count())
cities = ['Houston', 'Dallas', 'Oklahoma City']
for c in qs:
self.assertIn(c.name, cities)
# Pulling out some cities.
houston = City.objects.get(name='Houston')
wellington = City.objects.get(name='Wellington')
pueblo = City.objects.get(name='Pueblo')
okcity = City.objects.get(name='Oklahoma City')
lawrence = City.objects.get(name='Lawrence')
# Now testing contains on the countries using the points for
# Houston and Wellington.
tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry
nz = Country.objects.get(mpoly__contains=wellington.point.hex) # Query w/EWKBHEX
self.assertEqual('Texas', tx.name)
self.assertEqual('New Zealand', nz.name)
# Testing `contains` on the states using the point for Lawrence.
ks = State.objects.get(poly__contains=lawrence.point)
self.assertEqual('Kansas', ks.name)
# Pueblo and Oklahoma City (even though OK City is within the bounding box of Texas)
# are not contained in Texas or New Zealand.
self.assertEqual(len(Country.objects.filter(mpoly__contains=pueblo.point)), 0) # Query w/GEOSGeometry object
self.assertEqual(len(Country.objects.filter(mpoly__contains=okcity.point.wkt)), 0) # Query w/WKT
# OK City is contained w/in bounding box of Texas.
if connection.features.supports_bbcontains_lookup:
qs = Country.objects.filter(mpoly__bbcontains=okcity.point)
self.assertEqual(1, len(qs))
self.assertEqual('Texas', qs[0].name)
@skipUnlessDBFeature("supports_crosses_lookup")
def test_crosses_lookup(self):
Track.objects.create(
name='Line1',
line=LineString([(-95, 29), (-60, 0)])
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 0), (-60, 29)])).count(),
1
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 30), (0, 30)])).count(),
0
)
@skipUnlessDBFeature("supports_isvalid_lookup")
def test_isvalid_lookup(self):
invalid_geom = fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 1 1, 1 0, 0 0))')
State.objects.create(name='invalid', poly=invalid_geom)
qs = State.objects.all()
if oracle or (mysql and connection.mysql_version < (8, 0, 0)):
# Kansas has adjacent vertices with distance 6.99244813842e-12
# which is smaller than the default Oracle tolerance.
# It's invalid on MySQL < 8 also.
qs = qs.exclude(name='Kansas')
self.assertEqual(State.objects.filter(name='Kansas', poly__isvalid=False).count(), 1)
self.assertEqual(qs.filter(poly__isvalid=False).count(), 1)
self.assertEqual(qs.filter(poly__isvalid=True).count(), qs.count() - 1)
@skipUnlessDBFeature("supports_left_right_lookups")
def test_left_right_lookups(self):
"Testing the 'left' and 'right' lookup types."
# Left: A << B => true if xmax(A) < xmin(B)
# Right: A >> B => true if xmin(A) > xmax(B)
# See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source.
# Getting the borders for Colorado & Kansas
co_border = State.objects.get(name='Colorado').poly
ks_border = State.objects.get(name='Kansas').poly
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
# These cities should be strictly to the right of the CO border.
cities = ['Houston', 'Dallas', 'Oklahoma City',
'Lawrence', 'Chicago', 'Wellington']
qs = City.objects.filter(point__right=co_border)
self.assertEqual(6, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# These cities should be strictly to the right of the KS border.
cities = ['Chicago', 'Wellington']
qs = City.objects.filter(point__right=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
vic = City.objects.get(point__left=co_border)
self.assertEqual('Victoria', vic.name)
cities = ['Pueblo', 'Victoria']
qs = City.objects.filter(point__left=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
@skipUnlessGISLookup("strictly_above", "strictly_below")
def test_strictly_above_below_lookups(self):
dallas = City.objects.get(name='Dallas')
self.assertQuerysetEqual(
City.objects.filter(point__strictly_above=dallas.point).order_by('name'),
['Chicago', 'Lawrence', 'Oklahoma City', 'Pueblo', 'Victoria'],
lambda b: b.name
)
self.assertQuerysetEqual(
City.objects.filter(point__strictly_below=dallas.point).order_by('name'),
['Houston', 'Wellington'],
lambda b: b.name
)
def test_equals_lookups(self):
"Testing the 'same_as' and 'equals' lookup types."
pnt = fromstr('POINT (-95.363151 29.763374)', srid=4326)
c1 = City.objects.get(point=pnt)
c2 = City.objects.get(point__same_as=pnt)
c3 = City.objects.get(point__equals=pnt)
for c in [c1, c2, c3]:
self.assertEqual('Houston', c.name)
@skipUnlessDBFeature("supports_null_geometries")
def test_null_geometries(self):
"Testing NULL geometry support, and the `isnull` lookup type."
# Creating a state with a NULL boundary.
State.objects.create(name='Puerto Rico')
# Querying for both NULL and Non-NULL values.
nullqs = State.objects.filter(poly__isnull=True)
validqs = State.objects.filter(poly__isnull=False)
# Puerto Rico should be NULL (it's a commonwealth unincorporated territory)
self.assertEqual(1, len(nullqs))
self.assertEqual('Puerto Rico', nullqs[0].name)
# GeometryField=None is an alias for __isnull=True.
self.assertCountEqual(State.objects.filter(poly=None), nullqs)
self.assertCountEqual(State.objects.exclude(poly=None), validqs)
# The valid states should be Colorado & Kansas
self.assertEqual(2, len(validqs))
state_names = [s.name for s in validqs]
self.assertIn('Colorado', state_names)
self.assertIn('Kansas', state_names)
# Saving another commonwealth w/a NULL geometry.
nmi = State.objects.create(name='Northern Mariana Islands', poly=None)
self.assertIsNone(nmi.poly)
# Assigning a geometry and saving -- then UPDATE back to NULL.
nmi.poly = 'POLYGON((0 0,1 0,1 1,1 0,0 0))'
nmi.save()
State.objects.filter(name='Northern Mariana Islands').update(poly=None)
self.assertIsNone(State.objects.get(name='Northern Mariana Islands').poly)
@skipUnlessDBFeature('supports_null_geometries', 'supports_crosses_lookup', 'supports_relate_lookup')
def test_null_geometries_excluded_in_lookups(self):
"""NULL features are excluded in spatial lookup functions."""
null = State.objects.create(name='NULL', poly=None)
queries = [
('equals', Point(1, 1)),
('disjoint', Point(1, 1)),
('touches', Point(1, 1)),
('crosses', LineString((0, 0), (1, 1), (5, 5))),
('within', Point(1, 1)),
('overlaps', LineString((0, 0), (1, 1), (5, 5))),
('contains', LineString((0, 0), (1, 1), (5, 5))),
('intersects', LineString((0, 0), (1, 1), (5, 5))),
('relate', (Point(1, 1), 'T*T***FF*')),
('same_as', Point(1, 1)),
('exact', Point(1, 1)),
('coveredby', Point(1, 1)),
('covers', Point(1, 1)),
]
for lookup, geom in queries:
with self.subTest(lookup=lookup):
self.assertNotIn(null, State.objects.filter(**{'poly__%s' % lookup: geom}))
def test_wkt_string_in_lookup(self):
# Valid WKT strings don't emit error logs.
with self.assertRaisesMessage(AssertionError, 'no logs'):
with self.assertLogs('django.contrib.gis', 'ERROR'):
State.objects.filter(poly__intersects='LINESTRING(0 0, 1 1, 5 5)')
@skipUnlessDBFeature("supports_relate_lookup")
def test_relate_lookup(self):
"Testing the 'relate' lookup type."
# To make things more interesting, we will have our Texas reference point in
# different SRIDs.
pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847)
pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326)
# Not passing in a geometry as first param raises a TypeError when
# initializing the QuerySet.
with self.assertRaises(ValueError):
Country.objects.filter(mpoly__relate=(23, 'foo'))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [((pnt1, 0), ValueError), ((pnt2, 'T*T***FF*', 0), ValueError)]:
qs = Country.objects.filter(mpoly__relate=bad_args)
with self.assertRaises(e):
qs.count()
# Relate works differently for the different backends.
if postgis or spatialite or mariadb:
contains_mask = 'T*T***FF*'
within_mask = 'T*F**F***'
intersects_mask = 'T********'
elif oracle:
contains_mask = 'contains'
within_mask = 'inside'
# TODO: This is not quite the same as the PostGIS mask above
intersects_mask = 'overlapbdyintersect'
# Testing contains relation mask.
if connection.features.supports_transform:
self.assertEqual(
Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name,
'Texas',
)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name)
# Testing within relation mask.
ks = State.objects.get(name='Kansas')
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, within_mask)).name)
# Testing intersection relation mask.
if not oracle:
if connection.features.supports_transform:
self.assertEqual(
Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name,
'Texas',
)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name)
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, intersects_mask)).name)
# With a complex geometry expression
mask = 'anyinteract' if oracle else within_mask
self.assertFalse(City.objects.exclude(point__relate=(functions.Union('point', 'point'), mask)))
def test_gis_lookups_with_complex_expressions(self):
multiple_arg_lookups = {'dwithin', 'relate'} # These lookups are tested elsewhere.
lookups = connection.ops.gis_operators.keys() - multiple_arg_lookups
self.assertTrue(lookups, 'No lookups found')
for lookup in lookups:
with self.subTest(lookup):
City.objects.filter(**{'point__' + lookup: functions.Union('point', 'point')}).exists()
def test_subquery_annotation(self):
multifields = MultiFields.objects.create(
city=City.objects.create(point=Point(1, 1)),
point=Point(2, 2),
poly=Polygon.from_bbox((0, 0, 2, 2)),
)
qs = MultiFields.objects.annotate(
city_point=Subquery(City.objects.filter(
id=OuterRef('city'),
).values('point')),
).filter(
city_point__within=F('poly'),
)
self.assertEqual(qs.get(), multifields)
class GeoQuerySetTest(TestCase):
# TODO: GeoQuerySet is removed, organize these test better.
fixtures = ['initial']
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent(self):
"""
Testing the `Extent` aggregate.
"""
# Reference query:
# `SELECT ST_extent(point) FROM geoapp_city WHERE (name='Houston' or name='Dallas');`
# => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203)
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
qs = City.objects.filter(name__in=('Houston', 'Dallas'))
extent = qs.aggregate(Extent('point'))['point__extent']
for val, exp in zip(extent, expected):
self.assertAlmostEqual(exp, val, 4)
self.assertIsNone(City.objects.filter(name=('Smalltown')).aggregate(Extent('point'))['point__extent'])
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent_with_limit(self):
"""
Testing if extent supports limit.
"""
extent1 = City.objects.all().aggregate(Extent('point'))['point__extent']
extent2 = City.objects.all()[:3].aggregate(Extent('point'))['point__extent']
self.assertNotEqual(extent1, extent2)
def test_make_line(self):
"""
Testing the `MakeLine` aggregate.
"""
if not connection.features.supports_make_line_aggr:
with self.assertRaises(NotSupportedError):
City.objects.all().aggregate(MakeLine('point'))
return
# MakeLine on an inappropriate field returns simply None
self.assertIsNone(State.objects.aggregate(MakeLine('poly'))['poly__makeline'])
# Reference query:
# SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city;
ref_line = GEOSGeometry(
'LINESTRING(-95.363151 29.763374,-96.801611 32.782057,'
'-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001,'
'-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)',
srid=4326
)
# We check for equality with a tolerance of 10e-5 which is a lower bound
# of the precisions of ref_line coordinates
line = City.objects.aggregate(MakeLine('point'))['point__makeline']
self.assertTrue(
ref_line.equals_exact(line, tolerance=10e-5),
"%s != %s" % (ref_line, line)
)
@skipUnlessDBFeature('supports_union_aggr')
def test_unionagg(self):
"""
Testing the `Union` aggregate.
"""
tx = Country.objects.get(name='Texas').mpoly
# Houston, Dallas -- Ordering may differ depending on backend or GEOS version.
union = GEOSGeometry('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
qs = City.objects.filter(point__within=tx)
with self.assertRaises(ValueError):
qs.aggregate(Union('name'))
# Using `field_name` keyword argument in one query and specifying an
# order in the other (which should not be used because this is
# an aggregate method on a spatial column)
u1 = qs.aggregate(Union('point'))['point__union']
u2 = qs.order_by('name').aggregate(Union('point'))['point__union']
self.assertTrue(union.equals(u1))
self.assertTrue(union.equals(u2))
qs = City.objects.filter(name='NotACity')
self.assertIsNone(qs.aggregate(Union('point'))['point__union'])
def test_within_subquery(self):
"""
Using a queryset inside a geo lookup is working (using a subquery)
(#14483).
"""
tex_cities = City.objects.filter(
point__within=Country.objects.filter(name='Texas').values('mpoly')).order_by('name')
self.assertEqual(list(tex_cities.values_list('name', flat=True)), ['Dallas', 'Houston'])
def test_non_concrete_field(self):
NonConcreteModel.objects.create(point=Point(0, 0), name='name')
list(NonConcreteModel.objects.all())
def test_values_srid(self):
for c, v in zip(City.objects.all(), City.objects.values()):
self.assertEqual(c.point.srid, v['point'].srid)
|
GHSA-3gh2-xw74-jmcw
|
cms/admin/pageadmin.py
|
@@ -928,6 +928,7 @@ def change_template(self, request, object_id):
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse(force_unicode(_("The template was successfully changed")))
+ @require_POST
@wrap_transaction
def move_page(self, request, page_id, extra_context=None):
"""
@@ -1013,6 +1014,7 @@ def copy_language(self, request, page_id):
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse("ok")
+ @require_POST
@wrap_transaction
def copy_page(self, request, page_id, extra_context=None):
"""
@@ -1046,6 +1048,7 @@ def copy_page(self, request, page_id, extra_context=None):
context.update(extra_context or {})
return HttpResponseRedirect('../../')
+ @require_POST
@wrap_transaction
@create_revision()
def publish_page(self, request, page_id, language):
@@ -1146,6 +1149,7 @@ def cleanup_history(self, page, publish=False):
revision.delete()
deleted.append(revision.pk)
+ @require_POST
@wrap_transaction
def unpublish(self, request, page_id, language):
"""
@@ -1181,6 +1185,7 @@ def unpublish(self, request, page_id, language):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
return HttpResponseRedirect(path)
+ @require_POST
@wrap_transaction
def revert_page(self, request, page_id, language):
page = get_object_or_404(Page, id=page_id)
@@ -1316,6 +1321,7 @@ def preview_page(self, request, object_id, language):
page.site.domain, url)
return HttpResponseRedirect(url)
+ @require_POST
def change_innavigation(self, request, page_id):
"""
Switch the in_navigation of a page
|
# -*- coding: utf-8 -*-
import copy
from functools import wraps
import json
import sys
import django
from django.contrib.admin.helpers import AdminForm
from django.conf import settings
from django.contrib import admin, messages
from django.contrib.admin.models import LogEntry, CHANGE
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.util import get_deleted_objects
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site, get_current_site
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist, ValidationError
from django.db import router
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from django.template.defaultfilters import escape
from django.utils.translation import ugettext_lazy as _, get_language
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from cms.admin.change_list import CMSChangeList
from cms.admin.dialog.views import get_copy_dialog
from cms.admin.forms import (PageForm, AdvancedSettingsForm, PagePermissionForm,
PublicationDatesForm)
from cms.admin.permissionadmin import (PERMISSION_ADMIN_INLINES, PagePermissionInlineAdmin, ViewRestrictionInlineAdmin)
from cms.admin.placeholderadmin import PlaceholderAdminMixin
from cms.admin.views import revert_plugins
from cms.constants import PAGE_TYPES_ID, PUBLISHER_STATE_PENDING
from cms.models import Page, Title, CMSPlugin, PagePermission, GlobalPagePermission, StaticPlaceholder
from cms.models.managers import PagePermissionsPermissionManager
from cms.plugin_pool import plugin_pool
from cms.toolbar_pool import toolbar_pool
from cms.utils import helpers, permissions, get_language_from_request, admin as admin_utils, copy_plugins
from cms.utils.i18n import get_language_list, get_language_tuple, get_language_object, force_language
from cms.utils.admin import jsonify_request
from cms.utils.compat import DJANGO_1_4
from cms.utils.compat.dj import force_unicode, is_installed
from cms.utils.compat.urls import unquote
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import find_placeholder_relation
from cms.utils.permissions import has_global_page_permission, has_generic_permission
from cms.utils.plugins import current_site
from cms.utils.transaction import wrap_transaction
from cms.utils.urlutils import add_url_parameters, admin_reverse
require_POST = method_decorator(require_POST)
if is_installed('reversion'):
from reversion.admin import VersionAdmin as ModelAdmin
from reversion import create_revision
else: # pragma: no cover
from django.contrib.admin import ModelAdmin
class ReversionContext(object):
def __enter__(self):
yield
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __call__(self, func):
"""Allows this revision context to be used as a decorator."""
@wraps(func)
def do_revision_context(*args, **kwargs):
self.__enter__()
exception = False
try:
try:
return func(*args, **kwargs)
except:
exception = True
if not self.__exit__(*sys.exc_info()):
raise
finally:
if not exception:
self.__exit__(None, None, None)
return do_revision_context
def create_revision():
return ReversionContext()
PUBLISH_COMMENT = "Publish"
INITIAL_COMMENT = "Initial version."
class PageAdmin(PlaceholderAdminMixin, ModelAdmin):
form = PageForm
search_fields = ('=id', 'title_set__slug', 'title_set__title', 'reverse_id')
revision_form_template = "admin/cms/page/history/revision_header.html"
recover_form_template = "admin/cms/page/history/recover_header.html"
add_general_fields = ['title', 'slug', 'language', 'template']
change_list_template = "admin/cms/page/tree/base.html"
list_filter = ['in_navigation', 'template', 'changed_by', 'soft_root']
title_frontend_editable_fields = ['title', 'menu_title', 'page_title']
inlines = PERMISSION_ADMIN_INLINES
def get_urls(self):
"""Get the admin urls
"""
from django.conf.urls import patterns, url
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.module_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = patterns(
'',
pat(r'^([0-9]+)/advanced-settings/$', self.advanced),
pat(r'^([0-9]+)/dates/$', self.dates),
pat(r'^([0-9]+)/permission-settings/$', self.permissions),
pat(r'^([0-9]+)/delete-translation/$', self.delete_translation),
pat(r'^([0-9]+)/move-page/$', self.move_page),
pat(r'^([0-9]+)/copy-page/$', self.copy_page),
pat(r'^([0-9]+)/copy-language/$', self.copy_language),
pat(r'^([0-9]+)/dialog/copy/$', get_copy_dialog), # copy dialog
pat(r'^([0-9]+)/change-navigation/$', self.change_innavigation),
pat(r'^([0-9]+)/jsi18n/$', self.redirect_jsi18n),
pat(r'^([0-9]+)/permissions/$', self.get_permissions),
pat(r'^([0-9]+)/undo/$', self.undo),
pat(r'^([0-9]+)/redo/$', self.redo),
pat(r'^([0-9]+)/change_template/$', self.change_template),
pat(r'^([0-9]+)/([a-z\-]+)/descendants/$', self.descendants), # menu html for page descendants
pat(r'^([0-9]+)/([a-z\-]+)/edit-field/$', self.edit_title_fields),
pat(r'^([0-9]+)/([a-z\-]+)/publish/$', self.publish_page),
pat(r'^([0-9]+)/([a-z\-]+)/unpublish/$', self.unpublish),
pat(r'^([0-9]+)/([a-z\-]+)/revert/$', self.revert_page),
pat(r'^([0-9]+)/([a-z\-]+)/preview/$', self.preview_page),
pat(r'^add-page-type/$', self.add_page_type),
pat(r'^published-pages/$', self.get_published_pagelist),
url(r'^resolve/$', self.resolve, name="cms_page_resolve"),
)
if plugin_pool.get_all_plugins():
url_patterns += plugin_pool.get_patterns()
url_patterns += super(PageAdmin, self).get_urls()
return url_patterns
def redirect_jsi18n(self, request):
return HttpResponseRedirect(admin_reverse('jsi18n'))
def get_revision_instances(self, request, object):
"""Returns all the instances to be used in the object's revision."""
if isinstance(object, Title):
object = object.page
if isinstance(object, Page) and not object.publisher_is_draft:
object = object.publisher_public
placeholder_relation = find_placeholder_relation(object)
data = [object]
filters = {'placeholder__%s' % placeholder_relation: object}
for plugin in CMSPlugin.objects.filter(**filters):
data.append(plugin)
plugin_instance, admin = plugin.get_plugin_instance()
if plugin_instance:
data.append(plugin_instance)
if isinstance(object, Page):
titles = object.title_set.all()
for title in titles:
title.publisher_public = None
data.append(title)
return data
def save_model(self, request, obj, form, change):
"""
Move the page in the tree if necessary and save every placeholder
Content object.
"""
target = request.GET.get('target', None)
position = request.GET.get('position', None)
if 'recover' in request.path_info:
pk = obj.pk
if obj.parent_id:
parent = Page.objects.get(pk=obj.parent_id)
else:
parent = None
obj.lft = 0
obj.rght = 0
obj.tree_id = 0
obj.level = 0
obj.pk = None
obj.insert_at(parent, save=False)
obj.pk = pk
obj.save(no_signals=True)
else:
if 'history' in request.path_info:
old_obj = Page.objects.get(pk=obj.pk)
obj.level = old_obj.level
obj.parent_id = old_obj.parent_id
obj.rght = old_obj.rght
obj.lft = old_obj.lft
obj.tree_id = old_obj.tree_id
new = False
if not obj.pk:
new = True
obj.save()
if 'recover' in request.path_info or 'history' in request.path_info:
revert_plugins(request, obj.version.pk, obj)
if target is not None and position is not None:
try:
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
pass
else:
obj.move_to(target, position)
page_type_id = form.cleaned_data.get('page_type')
copy_target_id = request.GET.get('copy_target')
if copy_target_id or page_type_id:
if page_type_id:
copy_target_id = page_type_id
copy_target = Page.objects.get(pk=copy_target_id)
if not copy_target.has_view_permission(request):
raise PermissionDenied()
obj = Page.objects.get(pk=obj.pk) #mptt reload
copy_target._copy_attributes(obj, clean=True)
obj.save()
for lang in copy_target.languages.split(','):
copy_target._copy_contents(obj, lang)
if not 'permission' in request.path_info:
language = form.cleaned_data['language']
Title.objects.set_or_create(
request,
obj,
form,
language,
)
# is it home? publish it right away
if new and Page.objects.filter(site_id=obj.site_id).count() == 1:
obj.publish(language)
def get_fieldsets(self, request, obj=None):
form = self.get_form(request, obj, fields=None)
if getattr(form, 'fieldsets', None) is None:
fields = list(form.base_fields) + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
else:
return form.fieldsets
def get_inline_classes(self, request, obj=None, **kwargs):
if obj and 'permission' in request.path_info:
return PERMISSION_ADMIN_INLINES
return []
def get_form_class(self, request, obj=None, **kwargs):
if 'advanced' in request.path_info:
return AdvancedSettingsForm
elif 'permission' in request.path_info:
return PagePermissionForm
elif 'dates' in request.path_info:
return PublicationDatesForm
return self.form
def get_form(self, request, obj=None, **kwargs):
"""
Get PageForm for the Page model and modify its fields depending on
the request.
"""
language = get_language_from_request(request, obj)
form_cls = self.get_form_class(request, obj)
form = super(PageAdmin, self).get_form(request, obj, form=form_cls, **kwargs)
# get_form method operates by overriding initial fields value which
# may persist across invocation. Code below deepcopies fields definition
# to avoid leaks
for field in form.base_fields.keys():
form.base_fields[field] = copy.deepcopy(form.base_fields[field])
if 'language' in form.base_fields:
form.base_fields['language'].initial = language
if 'page_type' in form.base_fields:
if 'copy_target' in request.GET or 'add_page_type' in request.GET or obj:
del form.base_fields['page_type']
elif not Title.objects.filter(page__parent__reverse_id=PAGE_TYPES_ID, language=language).exists():
del form.base_fields['page_type']
if 'add_page_type' in request.GET:
del form.base_fields['menu_title']
del form.base_fields['meta_description']
del form.base_fields['page_title']
self.inlines = self.get_inline_classes(request, obj, **kwargs)
if obj:
if 'history' in request.path_info or 'recover' in request.path_info:
version_id = request.path_info.split('/')[-2]
else:
version_id = None
title_obj = obj.get_title_obj(language=language, fallback=False, version_id=version_id, force_reload=True)
if 'site' in form.base_fields and form.base_fields['site'].initial is None:
form.base_fields['site'].initial = obj.site
for name in ('slug', 'title', 'meta_description', 'menu_title', 'page_title', 'redirect'):
if name in form.base_fields:
form.base_fields[name].initial = getattr(title_obj, name)
if 'overwrite_url' in form.base_fields:
if title_obj.has_url_overwrite:
form.base_fields['overwrite_url'].initial = title_obj.path
else:
form.base_fields['overwrite_url'].initial = ''
else:
for name in ('slug', 'title'):
form.base_fields[name].initial = u''
if 'target' in request.GET or 'copy_target' in request.GET:
target = request.GET.get('copy_target') or request.GET.get('target')
if 'position' in request.GET:
position = request.GET['position']
if position == 'last-child' or position == 'first-child':
form.base_fields['parent'].initial = request.GET.get('target', None)
else:
sibling = Page.objects.get(pk=target)
form.base_fields['parent'].initial = sibling.parent_id
else:
form.base_fields['parent'].initial = request.GET.get('target', None)
form.base_fields['site'].initial = request.session.get('cms_admin_site', None)
return form
def advanced(self, request, object_id):
page = get_object_or_404(Page, pk=object_id)
if not page.has_advanced_settings_permission(request):
raise PermissionDenied("No permission for editing advanced settings")
return self.change_view(request, object_id, extra_context={'advanced_settings': True, 'title': _("Advanced Settings")})
def dates(self, request, object_id):
return self.change_view(request, object_id, extra_context={'publishing_dates': True, 'title': _("Publishing dates")})
def permissions(self, request, object_id):
page = get_object_or_404(Page, pk=object_id)
if not page.has_change_permissions_permission(request):
raise PermissionDenied("No permission for editing advanced settings")
return self.change_view(request, object_id, extra_context={'show_permissions': True, 'title': _("Change Permissions")})
def get_inline_instances(self, request, obj=None):
if DJANGO_1_4:
inlines = super(PageAdmin, self).get_inline_instances(request)
if hasattr(self, '_current_page'):
obj = self._current_page
else:
inlines = super(PageAdmin, self).get_inline_instances(request, obj)
if get_cms_setting('PERMISSION') and obj:
filtered_inlines = []
for inline in inlines:
if (isinstance(inline, PagePermissionInlineAdmin)
and not isinstance(inline, ViewRestrictionInlineAdmin)):
if "recover" in request.path or "history" in request.path:
# do not display permissions in recover mode
continue
if not obj.has_change_permissions_permission(request):
continue
filtered_inlines.append(inline)
inlines = filtered_inlines
return inlines
def get_unihandecode_context(self, language):
if language[:2] in get_cms_setting('UNIHANDECODE_DECODERS'):
uhd_lang = language[:2]
else:
uhd_lang = get_cms_setting('UNIHANDECODE_DEFAULT_DECODER')
uhd_host = get_cms_setting('UNIHANDECODE_HOST')
uhd_version = get_cms_setting('UNIHANDECODE_VERSION')
if uhd_lang and uhd_host and uhd_version:
uhd_urls = [
'%sunihandecode-%s.core.min.js' % (uhd_host, uhd_version),
'%sunihandecode-%s.%s.min.js' % (uhd_host, uhd_version, uhd_lang),
]
else:
uhd_urls = []
return {'unihandecode_lang': uhd_lang, 'unihandecode_urls': uhd_urls}
def add_view(self, request, form_url='', extra_context=None):
extra_context = extra_context or {}
language = get_language_from_request(request)
extra_context.update({
'language': language,
})
if not request.GET.get('add_page_type') is None:
extra_context.update({
'add_page_type': True,
'title': _("Add Page Type"),
})
elif 'copy_target' in request.GET:
extra_context.update({
'title': _("Add Page Copy"),
})
else:
extra_context = self.update_language_tab_context(request, context=extra_context)
extra_context.update(self.get_unihandecode_context(language))
return super(PageAdmin, self).add_view(request, form_url, extra_context=extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
"""
The 'change' admin view for the Page model.
"""
if extra_context is None:
extra_context = {'basic_info': True}
try:
obj = self.model.objects.get(pk=object_id)
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
else:
#activate(user_lang_set)
context = {
'page': obj,
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
'ADMIN_MEDIA_URL': settings.STATIC_URL,
'can_change': obj.has_change_permission(request),
'can_change_permissions': obj.has_change_permissions_permission(request),
'current_site_id': settings.SITE_ID,
}
context.update(extra_context or {})
extra_context = self.update_language_tab_context(request, obj, context)
tab_language = get_language_from_request(request)
extra_context.update(self.get_unihandecode_context(tab_language))
# get_inline_instances will need access to 'obj' so that it can
# determine if current user has enough rights to see PagePermissionInlineAdmin
# because in django versions <1.5 get_inline_instances doesn't receive 'obj'
# as a parameter, the workaround is to set it as an attribute...
if DJANGO_1_4:
self._current_page = obj
response = super(PageAdmin, self).change_view(
request, object_id, form_url=form_url, extra_context=extra_context)
if tab_language and response.status_code == 302 and response._headers['location'][1] == request.path_info:
location = response._headers['location']
response._headers['location'] = (location[0], "%s?language=%s" % (location[1], tab_language))
return response
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
# add context variables
filled_languages = []
if obj:
filled_languages = [t[0] for t in obj.title_set.filter(title__isnull=False).values_list('language')]
allowed_languages = [lang[0] for lang in self._get_site_languages(obj)]
context.update({
'filled_languages': [lang for lang in filled_languages if lang in allowed_languages],
})
return super(PageAdmin, self).render_change_form(request, context, add, change, form_url, obj)
def _get_site_languages(self, obj=None):
site_id = None
if obj:
site_id = obj.site_id
else:
site_id = Site.objects.get_current().pk
return get_language_tuple(site_id)
def update_language_tab_context(self, request, obj=None, context=None):
if not context:
context = {}
language = get_language_from_request(request, obj)
languages = self._get_site_languages(obj)
context.update({
'language': language,
'language_tabs': languages,
# Dates are not language dependent, thus we hide the language
# selection bar: the language is forced through the form class
'show_language_tabs': len(list(languages)) > 1 and not context.get('publishing_dates', False),
})
return context
def response_change(self, request, obj):
"""Called always when page gets changed, call save on page, there may be
some new stuff, which should be published after all other objects on page
are collected.
"""
# save the object again, so all the related changes to page model
# can be published if required
obj.save()
return super(PageAdmin, self).response_change(request, obj)
def has_add_permission(self, request):
"""
Return true if the current user has permission to add a new page.
"""
if get_cms_setting('PERMISSION'):
return permissions.has_page_add_permission(request)
return super(PageAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
if get_cms_setting('PERMISSION'):
if obj:
return obj.has_change_permission(request)
else:
return permissions.has_page_change_permission(request)
return super(PageAdmin, self).has_change_permission(request, obj)
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance. If CMS_PERMISSION are in use also takes look to
object permissions.
"""
if get_cms_setting('PERMISSION') and obj is not None:
return obj.has_delete_permission(request)
return super(PageAdmin, self).has_delete_permission(request, obj)
def has_recover_permission(self, request):
"""
Returns True if the use has the right to recover pages
"""
if not is_installed('reversion'):
return False
user = request.user
if user.is_superuser:
return True
try:
if has_global_page_permission(request, can_recover_page=True):
return True
except:
pass
return False
def has_add_plugin_permission(self, request, placeholder, plugin_type):
if not permissions.has_plugin_permission(request.user, plugin_type, "add"):
return False
page = placeholder.page
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
return True
def has_copy_plugin_permission(self, request, source_placeholder, target_placeholder, plugins):
source_page = source_placeholder.page
if source_page and not source_page.has_change_permission(request):
return False
target_page = target_placeholder.page
if target_page and not target_page.has_change_permission(request):
return False
if target_page and not target_page.publisher_is_draft:
return False
for plugin in plugins:
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "add"):
return False
return True
def has_change_plugin_permission(self, request, plugin):
page = plugin.placeholder.page if plugin.placeholder else None
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
return True
def has_move_plugin_permission(self, request, plugin, target_placeholder):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
page = plugin.placeholder.page
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
return True
def has_delete_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "delete"):
return False
page = plugin.placeholder.page
if page:
if not page.publisher_is_draft:
return False
if not page.has_change_permission(request):
return False
return True
def has_clear_placeholder_permission(self, request, placeholder):
page = placeholder.page if placeholder else None
if page:
if not page.publisher_is_draft:
return False
if not page.has_change_permission(request):
return False
return True
def post_add_plugin(self, request, placeholder, plugin):
if is_installed('reversion') and placeholder.page:
plugin_name = force_unicode(plugin_pool.get_plugin(plugin.plugin_type).name)
message = _(u"%(plugin_name)s plugin added to %(placeholder)s") % {
'plugin_name': plugin_name, 'placeholder': placeholder}
self.cleanup_history(placeholder.page)
helpers.make_revision_with_plugins(placeholder.page, request.user, message)
def post_copy_plugins(self, request, source_placeholder, target_placeholder, plugins):
page = target_placeholder.page
if page and is_installed('reversion'):
message = _(u"Copied plugins to %(placeholder)s") % {'placeholder': target_placeholder}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
def post_edit_plugin(self, request, plugin):
page = plugin.placeholder.page
if page:
# if reversion is installed, save version of the page plugins
if is_installed('reversion') and page:
plugin_name = force_unicode(plugin_pool.get_plugin(plugin.plugin_type).name)
message = _(
u"%(plugin_name)s plugin edited at position %(position)s in %(placeholder)s") % {
'plugin_name': plugin_name,
'position': plugin.position,
'placeholder': plugin.placeholder.slot
}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
def post_move_plugin(self, request, source_placeholder, target_placeholder, plugin):
page = target_placeholder.page
if page and is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, _(u"Plugins were moved"))
def post_delete_plugin(self, request, plugin):
plugin_name = force_unicode(plugin_pool.get_plugin(plugin.plugin_type).name)
page = plugin.placeholder.page
if page:
page.save()
comment = _("%(plugin_name)s plugin at position %(position)s in %(placeholder)s was deleted.") % {
'plugin_name': plugin_name,
'position': plugin.position,
'placeholder': plugin.placeholder,
}
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, comment)
def post_clear_placeholder(self, request, placeholder):
page = placeholder.page
if page:
page.save()
comment = _('All plugins in the placeholder "%(name)s" were deleted.') % {
'name': force_unicode(placeholder)
}
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, comment)
def get_placeholder_template(self, request, placeholder):
page = placeholder.page
if page:
return page.get_template()
def changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
return HttpResponseForbidden(force_unicode(_("You do not have permission to change pages.")))
try:
cl = CMSChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in request.GET.keys():
return render_to_response('admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path_info + '?' + ERROR_FLAG + '=1')
cl.set_items(request)
site_id = request.GET.get('site__exact', None)
if site_id is None:
site_id = current_site(request).pk
site_id = int(site_id)
# languages
languages = get_language_list(site_id)
# parse the cookie that saves which page trees have
# been opened already and extracts the page ID
djangocms_nodes_open = request.COOKIES.get('djangocms_nodes_open', '')
raw_nodes = unquote(djangocms_nodes_open).split(',')
try:
open_menu_trees = [int(c.split('page_', 1)[1]) for c in raw_nodes]
except IndexError:
open_menu_trees = []
# Language may be present in the GET dictionary but empty
language = request.GET.get('language', get_language())
if not language:
language = get_language()
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'opts': opts,
'has_add_permission': self.has_add_permission(request),
'root_path': admin_reverse('index'),
'app_label': app_label,
'preview_language': language,
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
'DEBUG': settings.DEBUG,
'site_languages': languages,
'open_menu_trees': open_menu_trees,
}
if is_installed('reversion'):
context['has_recover_permission'] = self.has_recover_permission(request)
context['has_change_permission'] = self.has_change_permission(request)
context.update(extra_context or {})
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, context_instance=RequestContext(request))
def recoverlist_view(self, request, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
return super(PageAdmin, self).recoverlist_view(request, extra_context)
def recover_view(self, request, version_id, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
return super(PageAdmin, self).recover_view(request, version_id, extra_context)
def revision_view(self, request, object_id, version_id, extra_context=None):
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
response = super(PageAdmin, self).revision_view(request, object_id, version_id, extra_context)
return response
def history_view(self, request, object_id, extra_context=None):
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
return super(PageAdmin, self).history_view(request, object_id, extra_context)
def render_revision_form(self, request, obj, version, context, revert=False, recover=False):
# reset parent to null if parent is not found
if version.field_dict['parent']:
try:
Page.objects.get(pk=version.field_dict['parent'])
except:
if revert and obj.parent_id != int(version.field_dict['parent']):
version.field_dict['parent'] = obj.parent_id
if recover:
obj.parent = None
obj.parent_id = None
version.field_dict['parent'] = None
obj.version = version
return super(PageAdmin, self).render_revision_form(request, obj, version, context, revert, recover)
@require_POST
def undo(self, request, object_id):
if not is_installed('reversion'):
return HttpResponseBadRequest('django reversion not installed')
from reversion.models import Revision
from cms.utils.page_resolver import is_valid_url
import reversion
page = get_object_or_404(Page, pk=object_id)
old_titles = list(page.title_set.all())
if not page.publisher_is_draft:
page = page.publisher_draft
if not page.has_change_permission(request):
return HttpResponseForbidden(force_unicode(_("You do not have permission to change this page")))
versions = reversion.get_for_object(page)
if page.revision_id:
current_revision = Revision.objects.get(pk=page.revision_id)
else:
try:
current_version = versions[0]
except IndexError:
return HttpResponseBadRequest("no current revision found")
current_revision = current_version.revision
try:
previous_version = versions.filter(revision__pk__lt=current_revision.pk)[0]
except IndexError:
return HttpResponseBadRequest("no previous revision found")
previous_revision = previous_version.revision
# clear all plugins
placeholders = page.placeholders.all()
placeholder_ids = []
for placeholder in placeholders:
placeholder_ids.append(placeholder.pk)
plugins = CMSPlugin.objects.filter(placeholder__in=placeholder_ids).order_by('-level')
for plugin in plugins:
plugin._no_reorder = True
plugin.delete()
# TODO: delete placeholders instead of finding duplicates for 3.1
#page.placeholders.all().delete()
previous_revision.revert(True)
rev_page = get_object_or_404(Page, pk=page.pk)
rev_page.revision_id = previous_revision.pk
rev_page.publisher_public_id = page.publisher_public_id
rev_page.save()
new_placeholders = rev_page.placeholders.all()
slots = {}
for new_ph in new_placeholders:
if not new_ph.slot in slots:
slots[new_ph.slot] = new_ph
else:
if new_ph in placeholder_ids:
new_ph.delete()
elif slots[new_ph.slot] in placeholder_ids:
slots[new_ph.slot].delete()
new_titles = rev_page.title_set.all()
for title in new_titles:
try:
is_valid_url(title.path, rev_page)
except ValidationError:
for old_title in old_titles:
if old_title.language == title.language:
title.slug = old_title.slug
title.save()
messages.error(request, _("Page reverted but slug stays the same because of url collisions."))
return HttpResponse("ok")
@require_POST
def redo(self, request, object_id):
if not is_installed('reversion'):
return HttpResponseBadRequest('django reversion not installed')
from reversion.models import Revision
import reversion
from cms.utils.page_resolver import is_valid_url
page = get_object_or_404(Page, pk=object_id)
old_titles = list(page.title_set.all())
if not page.publisher_is_draft:
page = page.publisher_draft
if not page.has_change_permission(request):
return HttpResponseForbidden(force_unicode(_("You do not have permission to change this page")))
versions = reversion.get_for_object(page)
if page.revision_id:
current_revision = Revision.objects.get(pk=page.revision_id)
else:
try:
current_version = versions[0]
except IndexError:
return HttpResponseBadRequest("no current revision found")
current_revision = current_version.revision
try:
previous_version = versions.filter(revision__pk__gt=current_revision.pk).order_by('pk')[0]
except IndexError:
return HttpResponseBadRequest("no next revision found")
next_revision = previous_version.revision
# clear all plugins
placeholders = page.placeholders.all()
placeholder_ids = []
for placeholder in placeholders:
placeholder_ids.append(placeholder.pk)
plugins = CMSPlugin.objects.filter(placeholder__in=placeholder_ids).order_by('-level')
for plugin in plugins:
plugin._no_reorder = True
plugin.delete()
# TODO: 3.1 remove the placeholder matching from below and just delete them
#page.placeholders.all().delete()
next_revision.revert(True)
rev_page = get_object_or_404(Page, pk=page.pk)
rev_page.revision_id = next_revision.pk
rev_page.publisher_public_id = page.publisher_public_id
rev_page.save()
new_placeholders = rev_page.placeholders.all()
slots = {}
for new_ph in new_placeholders:
if not new_ph.slot in slots:
slots[new_ph.slot] = new_ph
else:
if new_ph in placeholder_ids:
new_ph.delete()
elif slots[new_ph.slot] in placeholder_ids:
slots[new_ph.slot].delete()
new_titles = rev_page.title_set.all()
for title in new_titles:
try:
is_valid_url(title.path, rev_page)
except ValidationError:
for old_title in old_titles:
if old_title.language == title.language:
title.slug = old_title.slug
title.save()
messages.error(request, _("Page reverted but slug stays the same because of url collisions."))
return HttpResponse("ok")
@require_POST
@create_revision()
def change_template(self, request, object_id):
page = get_object_or_404(Page, pk=object_id)
if not page.has_change_permission(request):
return HttpResponseForbidden(force_unicode(_("You do not have permission to change the template")))
to_template = request.POST.get("template", None)
if to_template not in dict(get_cms_setting('TEMPLATES')):
return HttpResponseBadRequest(force_unicode(_("Template not valid")))
page.template = to_template
page.save()
if is_installed('reversion'):
message = _("Template changed to %s") % dict(get_cms_setting('TEMPLATES'))[to_template]
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse(force_unicode(_("The template was successfully changed")))
@wrap_transaction
def move_page(self, request, page_id, extra_context=None):
"""
Move the page to the requested target, at the given position
"""
target = request.POST.get('target', None)
position = request.POST.get('position', None)
if target is None or position is None:
return HttpResponseRedirect('../../')
try:
page = self.model.objects.get(pk=page_id)
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
return jsonify_request(HttpResponseBadRequest("error"))
# does he haves permissions to do this...?
if not page.has_move_page_permission(request) or \
not target.has_add_permission(request):
return jsonify_request(
HttpResponseForbidden(force_unicode(_("Error! You don't have permissions to move this page. Please reload the page"))))
# move page
page.move_page(target, position)
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, _("Page moved"))
return jsonify_request(HttpResponse(admin_utils.render_admin_menu_item(request, page).content))
def get_permissions(self, request, page_id):
page = get_object_or_404(Page, id=page_id)
can_change_list = Page.permissions.get_change_id_list(request.user, page.site_id)
global_page_permissions = GlobalPagePermission.objects.filter(sites__in=[page.site_id])
page_permissions = PagePermission.objects.for_page(page)
all_permissions = list(global_page_permissions) + list(page_permissions)
# does he can change global permissions ?
has_global = permissions.has_global_change_permissions_permission(request)
permission_set = []
for permission in all_permissions:
if isinstance(permission, GlobalPagePermission):
if has_global:
permission_set.append([(True, True), permission])
else:
permission_set.append([(True, False), permission])
else:
if can_change_list == PagePermissionsPermissionManager.GRANT_ALL:
can_change = True
else:
can_change = permission.page_id in can_change_list
permission_set.append([(False, can_change), permission])
context = {
'page': page,
'permission_set': permission_set,
}
return render_to_response('admin/cms/page/permissions.html', context)
@require_POST
@wrap_transaction
def copy_language(self, request, page_id):
with create_revision():
source_language = request.POST.get('source_language')
target_language = request.POST.get('target_language')
page = Page.objects.get(pk=page_id)
placeholders = page.placeholders.all()
if not target_language or not target_language in get_language_list():
return HttpResponseBadRequest(force_unicode(_("Language must be set to a supported language!")))
for placeholder in placeholders:
plugins = list(
placeholder.cmsplugin_set.filter(language=source_language).order_by('tree_id', 'level', 'position'))
if not self.has_copy_plugin_permission(request, placeholder, placeholder, plugins):
return HttpResponseForbidden(force_unicode(_('You do not have permission to copy these plugins.')))
copy_plugins.copy_plugins_to(plugins, placeholder, target_language)
if page and is_installed('reversion'):
message = _(u"Copied plugins from %(source_language)s to %(target_language)s") % {
'source_language': source_language, 'target_language': target_language}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse("ok")
@wrap_transaction
def copy_page(self, request, page_id, extra_context=None):
"""
Copy the page and all its plugins and descendants to the requested target, at the given position
"""
context = {}
page = Page.objects.get(pk=page_id)
target = request.POST.get('target', None)
position = request.POST.get('position', None)
site = request.POST.get('site', None)
if target is not None and position is not None and site is not None:
try:
target = self.model.objects.get(pk=target)
# does he have permissions to copy this page under target?
assert target.has_add_permission(request)
site = Site.objects.get(pk=site)
except (ObjectDoesNotExist, AssertionError):
return HttpResponse("error")
#context.update({'error': _('Page could not been moved.')})
else:
try:
kwargs = {
'copy_permissions': request.REQUEST.get('copy_permissions', False),
}
page.copy_page(target, site, position, **kwargs)
return jsonify_request(HttpResponse("ok"))
except ValidationError:
exc = sys.exc_info()[1]
return jsonify_request(HttpResponseBadRequest(exc.messages))
context.update(extra_context or {})
return HttpResponseRedirect('../../')
@wrap_transaction
@create_revision()
def publish_page(self, request, page_id, language):
try:
page = Page.objects.get(id=page_id, publisher_is_draft=True)
except Page.DoesNotExist:
page = None
# ensure user has permissions to publish this page
all_published = True
if page:
if not page.has_publish_permission(request):
return HttpResponseForbidden(force_unicode(_("You do not have permission to publish this page")))
published = page.publish(language)
if not published:
all_published = False
statics = request.GET.get('statics', '')
if not statics and not page:
return Http404("No page or stack found for publishing.")
if statics:
static_ids = statics .split(',')
for pk in static_ids:
static_placeholder = StaticPlaceholder.objects.get(pk=pk)
published = static_placeholder.publish(request, language)
if not published:
all_published = False
if page:
if all_published:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.info(request, _('The content was successfully published.'))
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(language),
action_flag=CHANGE,
)
else:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.warning(request, _("There was a problem publishing your content"))
if is_installed('reversion') and page:
self.cleanup_history(page, publish=True)
helpers.make_revision_with_plugins(page, request.user, PUBLISH_COMMENT)
# create a new publish reversion
if 'node' in request.REQUEST:
# if request comes from tree..
return admin_utils.render_admin_menu_item(request, page)
if 'redirect' in request.GET:
return HttpResponseRedirect(request.GET['redirect'])
referrer = request.META.get('HTTP_REFERER', '')
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
if admin_reverse('index') not in referrer:
if all_published:
if page:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
path = page.get_absolute_url(language, fallback=True)
else:
public_page = Page.objects.get(publisher_public=page.pk)
path = '%s?%s' % (public_page.get_absolute_url(language, fallback=True), get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '%s?%s' % (referrer, get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')
return HttpResponseRedirect(path)
def cleanup_history(self, page, publish=False):
if is_installed('reversion') and page:
# delete revisions that are not publish revisions
from reversion.models import Version
content_type = ContentType.objects.get_for_model(Page)
# reversion 1.8+ removes type field, revision filtering must be based on comments
versions_qs = Version.objects.filter(content_type=content_type, object_id_int=page.pk)
history_limit = get_cms_setting("MAX_PAGE_HISTORY_REVERSIONS")
deleted = []
for version in versions_qs.exclude(revision__comment__in=(INITIAL_COMMENT, PUBLISH_COMMENT)).order_by(
'-revision__pk')[history_limit - 1:]:
if not version.revision_id in deleted:
revision = version.revision
revision.delete()
deleted.append(revision.pk)
# delete all publish revisions that are more then MAX_PAGE_PUBLISH_REVERSIONS
publish_limit = get_cms_setting("MAX_PAGE_PUBLISH_REVERSIONS")
if publish_limit and publish:
deleted = []
for version in versions_qs.filter(revision__comment__exact=PUBLISH_COMMENT).order_by(
'-revision__pk')[publish_limit - 1:]:
if not version.revision_id in deleted:
revision = version.revision
revision.delete()
deleted.append(revision.pk)
@wrap_transaction
def unpublish(self, request, page_id, language):
"""
Publish or unpublish a language of a page
"""
site = Site.objects.get_current()
page = get_object_or_404(Page, pk=page_id)
if not page.has_publish_permission(request):
return HttpResponseForbidden(force_unicode(_("You do not have permission to unpublish this page")))
if not page.publisher_public_id:
return HttpResponseForbidden(force_unicode(_("This page was never published")))
try:
page.unpublish(language)
message = _('The %(language)s page "%(page)s" was successfully unpublished') % {
'language': get_language_object(language, site)['name'], 'page': page}
messages.info(request, message)
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(),
action_flag=CHANGE,
change_message=message,
)
except RuntimeError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
except ValidationError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
return HttpResponseRedirect(path)
@wrap_transaction
def revert_page(self, request, page_id, language):
page = get_object_or_404(Page, id=page_id)
# ensure user has permissions to publish this page
if not page.has_change_permission(request):
return HttpResponseForbidden(force_unicode(_("You do not have permission to change this page")))
page.revert(language)
messages.info(request, _('The page "%s" was successfully reverted.') % page)
if 'node' in request.REQUEST:
# if request comes from tree..
return admin_utils.render_admin_menu_item(request, page)
referer = request.META.get('HTTP_REFERER', '')
path = '../../'
if admin_reverse('index') not in referer:
path = '%s?%s' % (referer.split('?')[0], get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
return HttpResponseRedirect(path)
@create_revision()
def delete_translation(self, request, object_id, extra_context=None):
if 'language' in request.GET:
language = request.GET['language']
else:
language = get_language_from_request(request)
opts = Page._meta
titleopts = Title._meta
app_label = titleopts.app_label
pluginopts = CMSPlugin._meta
try:
obj = self.queryset(request).get(pk=unquote(object_id))
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(force_unicode(_("You do not have permission to change this page")))
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_unicode(opts.verbose_name),
'key': escape(object_id)
})
if not len(list(obj.get_languages())) > 1:
raise Http404(_('There only exists one translation for this page'))
titleobj = get_object_or_404(Title, page__id=object_id, language=language)
saved_plugins = CMSPlugin.objects.filter(placeholder__page__id=object_id, language=language)
using = router.db_for_read(self.model)
kwargs = {
'admin_site': self.admin_site,
'user': request.user,
'using': using
}
deleted_objects, perms_needed = get_deleted_objects(
[titleobj],
titleopts,
**kwargs
)[:2]
to_delete_plugins, perms_needed_plugins = get_deleted_objects(
saved_plugins,
pluginopts,
**kwargs
)[:2]
deleted_objects.append(to_delete_plugins)
perms_needed = set(list(perms_needed) + list(perms_needed_plugins))
if request.method == 'POST':
if perms_needed:
raise PermissionDenied
message = _('Title and plugins with language %(language)s was deleted') % {
'language': force_unicode(get_language_object(language)['name'])
}
self.log_change(request, titleobj, message)
messages.info(request, message)
titleobj.delete()
for p in saved_plugins:
p.delete()
public = obj.publisher_public
if public:
public.save()
if is_installed('reversion'):
self.cleanup_history(obj)
helpers.make_revision_with_plugins(obj, request.user, message)
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
context = {
"title": _("Are you sure?"),
"object_name": force_unicode(titleopts.verbose_name),
"object": titleobj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": admin_reverse('index'),
"app_label": app_label,
}
context.update(extra_context or {})
context_instance = RequestContext(request, current_app=self.admin_site.name)
return render_to_response(self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, titleopts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context, context_instance=context_instance)
def preview_page(self, request, object_id, language):
"""Redirecting preview function based on draft_id
"""
page = get_object_or_404(Page, id=object_id)
attrs = "?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
attrs += "&language=" + language
with force_language(language):
url = page.get_absolute_url(language) + attrs
site = get_current_site(request)
if not site == page.site:
url = "http%s://%s%s" % ('s' if request.is_secure() else '',
page.site.domain, url)
return HttpResponseRedirect(url)
def change_innavigation(self, request, page_id):
"""
Switch the in_navigation of a page
"""
page = get_object_or_404(Page, pk=page_id)
if page.has_change_permission(request):
page.toggle_in_navigation()
language = request.GET.get('language') or get_language_from_request(request)
return admin_utils.render_admin_menu_item(request, page, language=language)
return HttpResponseForbidden(force_unicode(_("You do not have permission to change this page's in_navigation status")))
def descendants(self, request, page_id, language):
"""
Get html for descendants of given page
Used for lazy loading pages in cms.changelist.js
Permission checks is done in admin_utils.get_admin_menu_item_context
which is called by admin_utils.render_admin_menu_item.
"""
page = get_object_or_404(Page, pk=page_id)
return admin_utils.render_admin_menu_item(request, page,
template="admin/cms/page/tree/lazy_menu.html", language=language)
def add_page_type(self, request):
site = Site.objects.get_current()
language = request.GET.get('language') or get_language()
target = request.GET.get('copy_target')
type_root, created = Page.objects.get_or_create(reverse_id=PAGE_TYPES_ID, publisher_is_draft=True, site=site,
defaults={'in_navigation': False})
type_title, created = Title.objects.get_or_create(page=type_root, language=language, slug=PAGE_TYPES_ID,
defaults={'title': _('Page Types')})
url = add_url_parameters(admin_reverse('cms_page_add'), target=type_root.pk, position='first-child',
add_page_type=1, copy_target=target, language=language)
return HttpResponseRedirect(url)
def resolve(self, request):
if not request.user.is_staff:
if DJANGO_1_4:
return HttpResponse('/', mimetype='text/plain')
else:
return HttpResponse('/', content_type='text/plain')
obj = False
url = False
if request.session.get('cms_log_latest', False):
log = LogEntry.objects.get(pk=request.session['cms_log_latest'])
try:
obj = log.get_edited_object()
except (ObjectDoesNotExist, ValueError):
obj = None
del request.session['cms_log_latest']
if obj and obj.__class__ in toolbar_pool.get_watch_models() and hasattr(obj, 'get_absolute_url'):
# This is a test if the object url can be retrieved
# In case it can't, object it's not taken into account
try:
force_unicode(obj.get_absolute_url())
except:
obj = None
else:
obj = None
if not obj:
pk = request.REQUEST.get('pk')
full_model = request.REQUEST.get('model')
if pk and full_model:
app_label, model = full_model.split('.')
if pk and app_label:
ctype = ContentType.objects.get(app_label=app_label, model=model)
try:
obj = ctype.get_object_for_this_type(pk=pk)
except ctype.model_class().DoesNotExist:
obj = None
try:
force_unicode(obj.get_absolute_url())
except:
obj = None
if obj:
if not request.toolbar or not request.toolbar.edit_mode:
if isinstance(obj, Page):
if obj.get_public_object():
url = obj.get_public_object().get_absolute_url()
else:
url = '%s?%s' % (
obj.get_draft_object().get_absolute_url(),
get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
)
else:
url = obj.get_absolute_url()
else:
url = obj.get_absolute_url()
if url:
return HttpResponse(force_unicode(url), content_type='text/plain')
return HttpResponse('', content_type='text/plain')
def lookup_allowed(self, key, *args, **kwargs):
if key == 'site__exact':
return True
return super(PageAdmin, self).lookup_allowed(key, *args, **kwargs)
def edit_title_fields(self, request, page_id, language):
title = Title.objects.get(page_id=page_id, language=language)
saved_successfully = False
raw_fields = request.GET.get("edit_fields", 'title')
edit_fields = [field for field in raw_fields.split(",") if field in self.title_frontend_editable_fields]
cancel_clicked = request.POST.get("_cancel", False)
opts = Title._meta
if not edit_fields:
# Defaults to title
edit_fields = ('title',)
if not has_generic_permission(title.page.pk, request.user, "change",
title.page.site.pk):
return HttpResponseForbidden(force_unicode(_("You do not have permission to edit this page")))
class PageTitleForm(django.forms.ModelForm):
"""
Dynamic form showing only the fields to be edited
"""
class Meta:
model = Title
fields = edit_fields
if not cancel_clicked and request.method == 'POST':
form = PageTitleForm(instance=title, data=request.POST)
if form.is_valid():
form.save()
saved_successfully = True
else:
form = PageTitleForm(instance=title)
admin_form = AdminForm(form, fieldsets=[(None, {'fields': edit_fields})], prepopulated_fields={},
model_admin=self)
media = self.media + admin_form.media
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'title': 'Title',
'plugin': title.page,
'plugin_id': title.page.id,
'adminform': admin_form,
'add': False,
'is_popup': True,
'media': media,
'opts': opts,
'change': True,
'save_as': False,
'has_add_permission': False,
'window_close_timeout': 10,
}
if cancel_clicked:
# cancel button was clicked
context.update({
'cancel': True,
})
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
if not cancel_clicked and request.method == 'POST' and saved_successfully:
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
return render_to_response('admin/cms/page/plugin/change_form.html', context, RequestContext(request))
def get_published_pagelist(self, *args, **kwargs):
"""
This view is used by the PageSmartLinkWidget as the user type to feed the autocomplete drop-down.
"""
request = args[0]
if request.is_ajax():
query_term = request.GET.get('q','').strip('/')
language_code = request.GET.get('language_code', settings.LANGUAGE_CODE)
matching_published_pages = Page.objects.published().public().filter(
Q(title_set__title__icontains=query_term, title_set__language=language_code)
| Q(title_set__path__icontains=query_term, title_set__language=language_code)
| Q(title_set__menu_title__icontains=query_term, title_set__language=language_code)
| Q(title_set__page_title__icontains=query_term, title_set__language=language_code)
).distinct()
results = []
for page in matching_published_pages:
results.append(
{
'path': page.get_path(language=language_code),
'title': page.get_title(language=language_code),
'redirect_url': page.get_absolute_url(language=language_code)
}
)
if DJANGO_1_4:
return HttpResponse(json.dumps(results), mimetype='application/json')
else:
return HttpResponse(json.dumps(results), content_type='application/json')
else:
return HttpResponseForbidden()
def add_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).add_plugin(*args, **kwargs)
def copy_plugins(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).copy_plugins(*args, **kwargs)
def edit_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).edit_plugin(*args, **kwargs)
def move_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).move_plugin(*args, **kwargs)
def delete_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).delete_plugin(*args, **kwargs)
def clear_placeholder(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).clear_placeholder(*args, **kwargs)
admin.site.register(Page, PageAdmin)
|
PYSEC-2017-11
|
cms/tests/admin.py
|
@@ -704,14 +704,14 @@ def test_change_publish_unpublish(self):
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.publish_page(request, page.pk, "en")
- self.assertEqual(response.status_code, 403)
+ self.assertEqual(response.status_code, 405)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
- # Forbidden
self.assertEqual(response.status_code, 403)
+ page = self.reload(page)
self.assertFalse(page.is_published('en'))
admin_user = self.get_admin()
@@ -747,6 +747,10 @@ def test_change_innavigation(self):
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.change_innavigation(request, page.pk)
+ self.assertEqual(response.status_code, 405)
+ with self.login_user_context(permless):
+ request = self.get_request(post_data={'no': 'data'})
+ response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
@@ -806,7 +810,7 @@ def test_revert_page_redirects(self):
admin_user = self.get_admin()
self.page.publish("en") # Ensure public copy exists before reverting
with self.login_user_context(admin_user):
- response = self.client.get(admin_reverse('cms_page_revert_page', args=(self.page.pk, 'en')))
+ response = self.client.post(admin_reverse('cms_page_revert_page', args=(self.page.pk, 'en')))
self.assertEqual(response.status_code, 302)
url = response['Location']
self.assertTrue(url.endswith('?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')))
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import json
import datetime
from cms.utils.urlutils import admin_reverse
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.models import Text
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.sites import site
from django.contrib.auth.models import Permission, AnonymousUser
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import (Http404, HttpResponseBadRequest, HttpResponseForbidden, HttpResponse,
QueryDict, HttpResponseNotFound)
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.encoding import smart_str
from django.utils import timezone
from django.utils.six.moves.urllib.parse import urlparse
from cms.admin.change_list import CMSChangeList
from cms.admin.forms import PageForm, AdvancedSettingsForm
from cms.admin.pageadmin import PageAdmin
from cms.admin.permissionadmin import PagePermissionInlineAdmin
from cms.api import create_page, create_title, add_plugin, assign_user_to_page, publish_page
from cms.constants import PLUGIN_MOVE_ACTION
from cms.models import UserSettings, StaticPlaceholder
from cms.models.pagemodel import Page
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.models.titlemodels import Title
from cms.test_utils import testcases as base
from cms.test_utils.testcases import CMSTestCase, URL_CMS_PAGE_DELETE, URL_CMS_PAGE, URL_CMS_TRANSLATION_DELETE
from cms.test_utils.util.context_managers import SettingsOverride
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils import get_cms_setting
from cms.utils.compat import DJANGO_1_4, DJANGO_1_6
from cms.utils.compat.dj import get_user_model, force_unicode
class AdminTestsBase(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def _get_guys(self, admin_only=False, use_global_permissions=True):
admiN_user = self.get_superuser()
if admin_only:
return admiN_user
USERNAME = 'test'
if get_user_model().USERNAME_FIELD == 'email':
normal_guy = get_user_model().objects.create_user(USERNAME, '[email protected]', '[email protected]')
else:
normal_guy = get_user_model().objects.create_user(USERNAME, '[email protected]', USERNAME)
normal_guy.is_staff = True
normal_guy.is_active = True
normal_guy.save()
normal_guy.user_permissions = Permission.objects.filter(
codename__in=['change_page', 'change_title', 'add_page', 'add_title', 'delete_page', 'delete_title']
)
if use_global_permissions:
gpp = GlobalPagePermission.objects.create(
user=normal_guy,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
return admiN_user, normal_guy
class AdminTestCase(AdminTestsBase):
def test_extension_not_in_admin(self):
admin_user, staff = self._get_guys()
with self.login_user_context(admin_user):
request = self.get_request('/admin/cms/page/1/', 'en',)
response = site.index(request)
self.assertNotContains(response, '/mytitleextension/')
self.assertNotContains(response, '/mypageextension/')
def test_permissioned_page_list(self):
"""
Makes sure that a user with restricted page permissions can view
the page list.
"""
admin_user, normal_guy = self._get_guys(use_global_permissions=False)
current_site = Site.objects.get(pk=1)
page = create_page("Test page", "nav_playground.html", "en",
site=current_site, created_by=admin_user)
PagePermission.objects.create(page=page, user=normal_guy)
with self.login_user_context(normal_guy):
resp = self.client.get(URL_CMS_PAGE)
self.assertEqual(resp.status_code, 200)
def test_edit_does_not_reset_page_adv_fields(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
OVERRIDE_URL = 'my/override/url'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.path = OVERRIDE_URL
title.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
# required only if user haves can_change_permission
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
'pagepermission_set-TOTAL_FORMS': 0, # required only if user haves can_change_permission
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
def test_edit_does_not_reset_apphook(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
APPLICATION_URLS = 'project.sampleapp.urls'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.save()
page.application_urls = APPLICATION_URLS
page.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0,
}
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
title = page.get_title_obj()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, '')
def test_2apphooks_with_same_namespace(self):
PAGE1 = 'Test Page'
PAGE2 = 'Test page 2'
APPLICATION_URLS = 'project.sampleapp.urls'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(PAGE1, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page2 = create_page(PAGE2, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.application_urls = APPLICATION_URLS
page.application_namespace = "space1"
page.save()
page2.application_urls = APPLICATION_URLS
page2.save()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': PAGE2,
'slug': page2.get_slug(),
'language': 'en',
'site': page.site.pk,
'template': page2.template,
'application_urls': 'SampleApp',
'application_namespace': 'space1',
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(Page.objects.filter(application_namespace="space1").count(), 1)
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 200)
page_data['application_namespace'] = 'space2'
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 302)
def test_delete(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
with self.assertNumQueries(FuzzyInt(300, 407)):
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_delete_diff_language(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "de",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
with self.assertNumQueries(FuzzyInt(300, 394)):
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_search_fields(self):
superuser = self.get_superuser()
from django.contrib.admin import site
with self.login_user_context(superuser):
for model, admin_instance in site._registry.items():
if model._meta.app_label != 'cms':
continue
if not admin_instance.search_fields:
continue
url = admin_reverse('cms_%s_changelist' % model._meta.module_name)
response = self.client.get('%s?q=1' % url)
errmsg = response.content
self.assertEqual(response.status_code, 200, errmsg)
def test_delete_translation(self):
admin_user = self.get_superuser()
page = create_page("delete-page-translation", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_title("de", "delete-page-translation-2", page, slug="delete-page-translation-2")
create_title("es-mx", "delete-page-translation-es", page, slug="delete-page-translation-es")
with self.login_user_context(admin_user):
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertRedirects(response, URL_CMS_PAGE)
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertRedirects(response, URL_CMS_PAGE)
def test_change_dates(self):
admin_user, staff = self._get_guys()
page = create_page('test-page', 'nav_playground.html', 'en')
page.publish('en')
draft = page.get_draft_object()
with self.settings(USE_TZ=False):
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.now() - datetime.timedelta(days=1)
new_end_date = timezone.now() + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(draft.publication_date.timetuple(), new_date.timetuple())
self.assertEqual(draft.publication_end_date.timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
with self.settings(USE_TZ=True):
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.localtime(timezone.now()) - datetime.timedelta(days=1)
new_end_date = timezone.localtime(timezone.now()) + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_date).timetuple(), new_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_end_date).timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
def test_change_template(self):
admin_user, staff = self._get_guys()
request = self.get_request('/admin/cms/page/1/', 'en')
request.method = "POST"
pageadmin = site._registry[Page]
with self.login_user_context(staff):
self.assertRaises(Http404, pageadmin.change_template, request, 1)
page = create_page('test-page', 'nav_playground.html', 'en')
response = pageadmin.change_template(request, page.pk)
self.assertEqual(response.status_code, 403)
url = admin_reverse('cms_page_change_template', args=(page.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {'template': 'doesntexist'})
self.assertEqual(response.status_code, 400)
response = self.client.post(url, {'template': get_cms_setting('TEMPLATES')[0][0]})
self.assertEqual(response.status_code, 200)
def test_get_permissions(self):
page = create_page('test-page', 'nav_playground.html', 'en')
url = admin_reverse('cms_page_get_permissions', args=(page.pk,))
response = self.client.get(url)
if DJANGO_1_6:
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/login.html')
else:
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/en/admin/login/?next=/en/admin/cms/page/%s/permissions/' % page.pk)
admin_user = self.get_superuser()
with self.login_user_context(admin_user):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateNotUsed(response, 'admin/login.html')
def test_changelist_items(self):
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=second_level_page_top)
self.assertEqual(Page.objects.all().count(), 4)
url = admin_reverse('cms_%s_changelist' % Page._meta.module_name)
request = self.get_request(url)
request.session = {}
request.user = admin_user
page_admin = site._registry[Page]
cl_params = [request, page_admin.model, page_admin.list_display,
page_admin.list_display_links, page_admin.list_filter,
page_admin.date_hierarchy, page_admin.search_fields,
page_admin.list_select_related, page_admin.list_per_page]
if hasattr(page_admin, 'list_max_show_all'): # django 1.4
cl_params.append(page_admin.list_max_show_all)
cl_params.extend([page_admin.list_editable, page_admin])
cl = CMSChangeList(*tuple(cl_params))
cl.set_items(request)
root_page = cl.get_items()[0]
self.assertEqual(root_page, first_level_page)
self.assertEqual(root_page.get_children()[0], second_level_page_top)
self.assertEqual(root_page.get_children()[1], second_level_page_bottom)
self.assertEqual(root_page.get_children()[0].get_children()[0], third_level_page)
def test_changelist_get_results(self):
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en', published=True)
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en", # nopyflakes
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en", # nopyflakes
created_by=admin_user, published=True,
parent=second_level_page_top)
fourth_level_page = create_page('level23', "nav_playground.html", "en", # nopyflakes
created_by=admin_user,
parent=self.reload(first_level_page))
self.assertEqual(Page.objects.all().count(), 9)
url = admin_reverse('cms_%s_changelist' % Page._meta.module_name)
request = self.get_request(url)
request.session = {}
request.user = admin_user
page_admin = site._registry[Page]
# full blown page list. only draft pages are taken into account
cl_params = [request, page_admin.model, page_admin.list_display,
page_admin.list_display_links, page_admin.list_filter,
page_admin.date_hierarchy, page_admin.search_fields,
page_admin.list_select_related, page_admin.list_per_page]
if hasattr(page_admin, 'list_max_show_all'): # django 1.4
cl_params.append(page_admin.list_max_show_all)
cl_params.extend([page_admin.list_editable, page_admin])
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 5)
# only one unpublished page is returned
request = self.get_request(url+'?q=level23')
request.session = {}
request.user = admin_user
cl_params[0] = request
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 1)
# a number of pages matches the query
request = self.get_request(url+'?q=level2')
request.session = {}
request.user = admin_user
cl_params[0] = request
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 3)
def test_changelist_tree(self):
""" This test checks for proper jstree cookie unquoting.
It should be converted to a selenium test to actually test the jstree behaviour.
Cookie set below is just a forged example (from live session)
"""
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=second_level_page_top)
url = admin_reverse('cms_%s_changelist' % Page._meta.module_name)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
self.client.cookies['djangocms_nodes_open'] = 'page_1%2Cpage_2'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["open_menu_trees"], [1, 2])
# tests descendants method for the lazy load ajax call
url = "%s%d/en/descendants/" % (url, first_level_page.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# should include both direct descendant pages
self.assertContains(response, 'id="page_%s"' % second_level_page_top.pk)
self.assertContains(response, 'id="page_%s"' % second_level_page_bottom.pk)
# but not any further down the tree
self.assertNotContains(response, 'id="page_%s"' % third_level_page.pk)
self.assertNotContains(response, 'None')
def test_unihandecode_doesnt_break_404_in_admin(self):
self.get_superuser()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
response = self.client.get('/en/admin/cms/page/1/?language=en')
self.assertEqual(response.status_code, 404)
def test_tree_displays_in_correct_language(self):
'''
Test to prove and protect that the page titles in the tree are
displayed in the currently set language.
'''
admin_guy, normal_guy = self._get_guys(use_global_permissions=False)
site = Site.objects.get(pk=1)
en_title = "EN Page"
es_title = "ES Pagina"
# Create a page in en
page = create_page(en_title, "nav_playground.html", "en", site=site, created_by=admin)
# Add a es-mx translation for this page
create_title("es-mx", es_title, page, slug="es_pagina")
url = admin_reverse('cms_%s_changelist' % Page._meta.module_name)
url_pat = '<a href="{0}/{1}/preview/"[^>]*>{2}</a>'
with self.login_user_context(admin_guy):
# Check the EN version of the tree...
response = self.client.get(url, {'language': 'en'})
self.assertRegexpMatches(str(response.content), url_pat.format(page.pk, 'en', en_title, ))
# Check the ES version of the tree...
response = self.client.get(url, {'language': 'es-mx'})
self.assertRegexpMatches(str(response.content), url_pat.format(page.pk, 'es-mx', es_title, ))
def test_empty_placeholder_in_correct_language(self):
"""
Test that Cleaning a placeholder only affect current language contents
"""
# create some objects
page_en = create_page("EmptyPlaceholderTestPage (EN)", "nav_playground.html", "en")
ph = page_en.placeholders.get(slot="body")
# add the text plugin to the en version of the page
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 1")
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 2")
# creating a de title of the page and adding plugins to it
create_title("de", page_en.get_title(), page_en, slug=page_en.get_slug())
add_plugin(ph, "TextPlugin", "de", body="Hello World DE")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 2")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 3")
# before cleaning the de placeholder
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 3)
admin_user, staff = self._get_guys()
with self.login_user_context(admin_user):
url = '%s?language=de' % admin_reverse('cms_page_clear_placeholder', args=[ph.pk])
response = self.client.post(url, {'test': 0})
self.assertEqual(response.status_code, 302)
# After cleaning the de placeholder, en placeholder must still have all the plugins
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 0)
class AdminTests(AdminTestsBase):
# TODO: needs tests for actual permissions, not only superuser/normaluser
def setUp(self):
self.page = create_page("testpage", "nav_playground.html", "en")
def get_admin(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True, is_superuser=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_permless(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "permless"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_page(self):
return self.page
def test_change_publish_unpublish(self):
page = self.get_page()
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 403)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
# Forbidden
self.assertEqual(response.status_code, 403)
self.assertFalse(page.is_published('en'))
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertTrue(page.is_published('en'))
response = self.admin_class.unpublish(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
def test_change_status_adds_log_entry(self):
page = self.get_page()
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
self.assertFalse(LogEntry.objects.count())
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(1, LogEntry.objects.count())
self.assertEqual(page.pk, int(LogEntry.objects.all()[0].object_id))
def test_change_innavigation(self):
page = self.get_page()
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
self.assertRaises(Http404, self.admin_class.change_innavigation,
request, page.pk + 100)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
old = page.in_navigation
response = self.admin_class.change_innavigation(request, page.pk)
# These asserts are for #3589
self.assertContains(response, 'lang="en"')
self.assertContains(response, './%s/en/preview/' % page.pk)
self.assertEqual(response.status_code, 200)
page = self.reload(page)
self.assertEqual(old, not page.in_navigation)
def test_publish_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.publish_page(request, Page.objects.all()[0].pk, "en")
self.assertEqual(response.status_code, 403)
def test_revert_page(self):
self.page.publish('en')
title = self.page.title_set.get(language='en')
title.title = 'new'
title.save()
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(Page.objects.all().count(), 2)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
request.method = "POST"
response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(Page.objects.all().count(), 2)
new_title = Title.objects.get(pk=title.pk)
self.assertNotEqual(title.title, new_title.title)
self.assertTrue(title.publisher_is_draft)
self.assertTrue(new_title.publisher_is_draft)
def test_revert_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, 'en')
self.assertEqual(response.status_code, 403)
def test_revert_page_redirects(self):
admin_user = self.get_admin()
self.page.publish("en") # Ensure public copy exists before reverting
with self.login_user_context(admin_user):
response = self.client.get(admin_reverse('cms_page_revert_page', args=(self.page.pk, 'en')))
self.assertEqual(response.status_code, 302)
url = response['Location']
self.assertTrue(url.endswith('?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')))
def test_remove_plugin_requires_post(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request()
response = self.admin_class.delete_plugin(request, plugin.pk)
self.assertEqual(response.status_code, 200)
def test_move_plugin(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
pageplugin = add_plugin(source, 'TextPlugin', 'en', body='test')
plugin_class = pageplugin.get_plugin_class_instance()
expected = {'reload': plugin_class.requires_reload(PLUGIN_MOVE_ACTION)}
placeholder = Placeholder.objects.all()[0]
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 405)
request = self.get_request(post_data={'not_usable': '1'})
self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'ids': plugin.pk})
self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': 'invalid-placeholder', 'plugin_language': 'en'})
self.assertRaises(ValueError, self.admin_class.move_plugin, request)
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'})
self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'})
self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
def test_move_language(self):
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
col = add_plugin(source, 'MultiColumnPlugin', 'en')
sub_col = add_plugin(source, 'ColumnPlugin', 'en', target=col)
col2 = add_plugin(source, 'MultiColumnPlugin', 'de')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': sub_col.pk,
'placeholder_id': source.id, 'plugin_parent': col2.pk, 'plugin_language': 'de'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
sub_col = CMSPlugin.objects.get(pk=sub_col.pk)
self.assertEqual(sub_col.language, "de")
self.assertEqual(sub_col.parent_id, col2.pk)
def test_preview_page(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
self.assertRaises(Http404, self.admin_class.preview_page, request, 404, "en")
page = self.get_page()
page.publish("en")
base_url = page.get_absolute_url()
with self.login_user_context(permless):
request = self.get_request('/?public=true')
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
request = self.get_request()
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
current_site = Site.objects.create(domain='django-cms.org', name='django-cms')
page.site = current_site
page.save()
page.publish("en")
self.assertTrue(page.is_home)
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'],
'http://django-cms.org%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
def test_too_many_plugins_global(self):
conf = {
'body': {
'limits': {
'global': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with SettingsOverride(CMS_PERMISSION=False,
CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_too_many_plugins_type(self):
conf = {
'body': {
'limits': {
'TextPlugin': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with SettingsOverride(CMS_PERMISSION=False,
CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_edit_title_dirty_bit(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_edit_title_languages(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_page_form_leak(self):
language = "en"
admin_user = self.get_admin()
request = self.get_request('/', 'en')
request.user = admin_user
page = create_page('A', 'nav_playground.html', language, menu_title='menu title')
page_admin = PageAdmin(Page, site)
page_admin._current_page = page
edit_form = page_admin.get_form(request, page)
add_form = page_admin.get_form(request, None)
self.assertEqual(edit_form.base_fields['menu_title'].initial, 'menu title')
self.assertEqual(add_form.base_fields['menu_title'].initial, None)
class NoDBAdminTests(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def test_lookup_allowed_site__exact(self):
self.assertTrue(self.admin_class.lookup_allowed('site__exact', '1'))
def test_lookup_allowed_published(self):
self.assertTrue(self.admin_class.lookup_allowed('published', value='1'))
class PluginPermissionTests(AdminTestsBase):
def setUp(self):
self._page = create_page('test page', 'nav_playground.html', 'en')
self._placeholder = self._page.placeholders.all()[0]
def _get_admin(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True, is_active=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
admin_user = User(**fields)
admin_user.set_password('admin')
admin_user.save()
return admin_user
def _get_page_admin(self):
return admin.site._registry[Page]
def _give_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.add(Permission.objects.get(codename=codename))
def _give_page_permission_rights(self, user):
self._give_permission(user, PagePermission, 'add')
self._give_permission(user, PagePermission, 'change')
self._give_permission(user, PagePermission, 'delete')
def _get_change_page_request(self, user, page):
return type('Request', (object,), {
'user': user,
'path': base.URL_CMS_PAGE_CHANGE % page.pk
})
def _give_cms_permissions(self, user, save=True):
for perm_type in ['add', 'change', 'delete']:
for model in [Page, Title]:
self._give_permission(user, model, perm_type, False)
gpp = GlobalPagePermission.objects.create(
user=user,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
if save:
user.save()
def _create_plugin(self):
plugin = add_plugin(self._placeholder, 'TextPlugin', 'en')
return plugin
def test_plugin_add_requires_permissions(self):
"""User tries to add a plugin but has no permissions. He can add the plugin after he got the permissions"""
admin = self._get_admin()
self._give_cms_permissions(admin)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='admin')
else:
self.client.login(username='admin', password='admin')
url = admin_reverse('cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self._give_permission(admin, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_edit_requires_permissions(self):
"""User tries to edit a plugin but has no permissions. He can edit the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_edit_plugin', args=[plugin.id])
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_edit_wrong_url(self):
"""User tries to edit a plugin using a random url. 404 response returned"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
self._give_permission(normal_guy, Text, 'change')
url = '%s/edit-plugin/%s/' % (admin_reverse('cms_page_edit_plugin', args=[plugin.id]), plugin.id)
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseNotFound.status_code)
self.assertTrue("Plugin not found" in force_unicode(response.content))
def test_plugin_remove_requires_permissions(self):
"""User tries to remove a plugin but has no permissions. He can remove the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_delete_plugin', args=[plugin.pk])
data = dict(plugin_id=plugin.id)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'delete')
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
def test_plugin_move_requires_permissions(self):
"""User tries to move a plugin but has no permissions. He can move the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_move_plugin')
data = dict(plugin_id=plugin.id,
placeholder_id=self._placeholder.pk,
plugin_parent='',
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugins_copy_requires_permissions(self):
"""User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_copy_plugins')
data = dict(source_plugin_id=plugin.id,
source_placeholder_id=self._placeholder.pk,
source_language='en',
target_language='fr',
target_placeholder_id=self._placeholder.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugins_copy_placeholder_ref(self):
"""User copies a placeholder into a clipboard. A PlaceholderReferencePlugin is created. Afterwards he copies this
into a placeholder and the PlaceholderReferencePlugin unpacks its content. After that he clear the clipboard"""
self.assertEqual(Placeholder.objects.count(), 2)
self._create_plugin()
self._create_plugin()
admin_user = self.get_superuser()
clipboard = Placeholder()
clipboard.save()
self.assertEqual(CMSPlugin.objects.count(), 2)
settings = UserSettings(language="fr", clipboard=clipboard, user=admin_user)
settings.save()
self.assertEqual(Placeholder.objects.count(), 3)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
url = admin_reverse('cms_page_copy_plugins')
data = dict(source_plugin_id='',
source_placeholder_id=self._placeholder.pk,
source_language='en',
target_language='en',
target_placeholder_id=clipboard.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
clipboard_plugins = clipboard.get_plugins()
self.assertEqual(CMSPlugin.objects.count(), 5)
self.assertEqual(clipboard_plugins.count(), 1)
self.assertEqual(clipboard_plugins[0].plugin_type, "PlaceholderPlugin")
placeholder_plugin, _ = clipboard_plugins[0].get_plugin_instance()
ref_placeholder = placeholder_plugin.placeholder_ref
copied_plugins = ref_placeholder.get_plugins()
self.assertEqual(copied_plugins.count(), 2)
data = dict(source_plugin_id=placeholder_plugin.pk,
source_placeholder_id=clipboard.pk,
source_language='en',
target_language='fr',
target_placeholder_id=self._placeholder.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
plugins = self._placeholder.get_plugins()
self.assertEqual(plugins.count(), 4)
self.assertEqual(CMSPlugin.objects.count(), 7)
self.assertEqual(Placeholder.objects.count(), 4)
url = admin_reverse('cms_page_clear_placeholder', args=[clipboard.pk])
with self.assertNumQueries(FuzzyInt(70, 80)):
response = self.client.post(url, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(CMSPlugin.objects.count(), 4)
self.assertEqual(Placeholder.objects.count(), 3)
def test_plugins_copy_language(self):
"""User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions"""
self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD != 'email':
self.client.login(username='test', password='test')
else:
self.client.login(username='[email protected]', password='[email protected]')
self.assertEqual(1, CMSPlugin.objects.all().count())
url = admin_reverse('cms_page_copy_language', args=[self._page.pk])
data = dict(
source_language='en',
target_language='fr',
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertEqual(2, CMSPlugin.objects.all().count())
def test_page_permission_inline_visibility(self):
User = get_user_model()
fields = dict(email='[email protected]', password='user', is_staff=True)
if get_user_model().USERNAME_FIELD != 'email':
fields[get_user_model().USERNAME_FIELD] = 'user'
user = User(**fields)
user.save()
self._give_page_permission_rights(user)
page = create_page('A', 'nav_playground.html', 'en')
page_permission = PagePermission.objects.create(
can_change_permissions=True, user=user, page=page)
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
# user has can_change_permission
# => must see the PagePermissionInline
self.assertTrue(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request,
page if not DJANGO_1_4 else None)))
page = Page.objects.get(pk=page.pk)
# remove can_change_permission
page_permission.can_change_permissions = False
page_permission.save()
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
# => PagePermissionInline is no longer visible
self.assertFalse(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request, page if not DJANGO_1_4 else None)))
def test_edit_title_is_allowed_for_staff_user(self):
"""
We check here both the permission on a single page, and the global permissions
"""
user = self._create_user('user', is_staff=True)
another_user = self._create_user('another_user', is_staff=True)
page = create_page('A', 'nav_playground.html', 'en')
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
page.pk, 'en'
))
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
username = getattr(user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
assign_user_to_page(page, user, grant_all=True)
username = getattr(user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
self._give_cms_permissions(another_user)
username = getattr(another_user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_add_returns_valid_pk_for_plugin(self):
admin_user = self._get_admin()
self._give_cms_permissions(admin_user)
self._give_permission(admin_user, Text, 'add')
username = getattr(admin_user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password='admin')
url = admin_reverse('cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertEqual(response['content-type'], 'application/json')
pk = response.content.decode('utf8').split("edit-plugin/")[1].split("/")[0]
self.assertTrue(CMSPlugin.objects.filter(pk=int(pk)).exists())
class AdminFormsTests(AdminTestsBase):
def test_clean_overwrite_url(self):
user = AnonymousUser()
user.is_superuser = True
user.pk = 1
request = type('Request', (object,), {'user': user})
with SettingsOverride():
data = {
'title': 'TestPage',
'slug': 'test-page',
'language': 'en',
'overwrite_url': '/overwrite/url/',
'site': Site.objects.get_current().pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'published': True
}
form = PageForm(data)
self.assertTrue(form.is_valid(), form.errors.as_text())
# WTF? WHY DOES form.save() not handle this stuff???
instance = form.save()
instance.permission_user_cache = user
instance.permission_advanced_settings_cache = True
Title.objects.set_or_create(request, instance, form, 'en')
form = PageForm(data, instance=instance)
self.assertTrue(form.is_valid(), form.errors.as_text())
def test_missmatching_site_parent_dotsite(self):
site0 = Site.objects.create(domain='foo.com', name='foo.com')
site1 = Site.objects.create(domain='foo.com', name='foo.com')
parent_page = Page.objects.create(
template='nav_playground.html',
site=site0)
new_page_data = {
'title': 'Title',
'slug': 'slug',
'language': 'en',
'site': site1.pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': parent_page.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
self.assertIn(u"Site doesn't match the parent's page site",
form.errors['__all__'])
def test_reverse_id_error_location(self):
''' Test moving the reverse_id validation error to a field specific one '''
# this is the Reverse ID we'll re-use to break things.
dupe_id = 'p1'
curren_site = Site.objects.get_current()
create_page('Page 1', 'nav_playground.html', 'en', reverse_id=dupe_id)
page2 = create_page('Page 2', 'nav_playground.html', 'en')
# Assemble a bunch of data to test the page form
page2_data = {
'language': 'en',
'site': curren_site.pk,
'reverse_id': dupe_id,
'template': 'col_two.html',
}
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertFalse(form.is_valid())
# reverse_id is the only item that is in __all__ as every other field
# has it's own clean method. Moving it to be a field error means
# __all__ is now not available.
self.assertNotIn('__all__', form.errors)
# In moving it to it's own field, it should be in form.errors, and
# the values contained therein should match these.
self.assertIn('reverse_id', form.errors)
self.assertEqual(1, len(form.errors['reverse_id']))
self.assertEqual([u'A page with this reverse URL id exists already.'],
form.errors['reverse_id'])
page2_data['reverse_id'] = ""
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertTrue(form.is_valid())
admin_user = self._get_guys(admin_only=True)
# reset some of page2_data so we can use cms.api.create_page
page2 = page2.reload()
page2.site = curren_site
page2.save()
with self.login_user_context(admin_user):
# re-reset the page2_data for the admin form instance.
page2_data['reverse_id'] = dupe_id
page2_data['site'] = curren_site.pk
# post to the admin change form for page 2, and test that the
# reverse_id form row has an errors class. Django's admin avoids
# collapsing these, so that the error is visible.
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page2_data)
self.assertContains(resp, '<div class="form-row errors reverse_id">')
def test_create_page_type(self):
page = create_page('Test', 'static.html', 'en', published=True, reverse_id="home")
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
page.publish('en')
self.assertEqual(Page.objects.count(), 2)
self.assertEqual(CMSPlugin.objects.count(), 4)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(
"%s?copy_target=%s&language=%s" % (admin_reverse("cms_page_add_page_type"), page.pk, 'en'))
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 3)
self.assertEqual(Page.objects.filter(reverse_id="page_types").count(), 1)
page_types = Page.objects.get(reverse_id='page_types')
url = response.url if hasattr(response, 'url') else response['Location']
expected_url_params = QueryDict(
'target=%s&position=first-child&add_page_type=1©_target=%s&language=en' % (page_types.pk, page.pk))
response_url_params = QueryDict(urlparse(url).query)
self.assertDictEqual(expected_url_params, response_url_params)
response = self.client.get("%s?copy_target=%s&language=%s" % (
admin_reverse("cms_page_add_page_type"), page.pk, 'en'), follow=True)
self.assertEqual(response.status_code, 200)
# test no page types if no page types there
response = self.client.get(admin_reverse('cms_page_add'))
self.assertNotContains(response, "page_type")
# create out first page type
page_data = {
'title': 'type1', 'slug': 'type1', '_save': 1, 'template': 'static.html', 'site': 1,
'language': 'en'
}
response = self.client.post(
"/en/admin/cms/page/add/?target=%s&position=first-child&add_page_type=1©_target=%s&language=en" % (
page_types.pk, page.pk), data=page_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 4)
self.assertEqual(CMSPlugin.objects.count(), 6)
response = self.client.get(admin_reverse('cms_page_add'))
self.assertContains(response, "page_type")
# no page types available if you use the copy_target
response = self.client.get("%s?copy_target=%s&language=en" % (admin_reverse('cms_page_add'), page.pk))
self.assertNotContains(response, "page_type")
def test_render_edit_mode(self):
from django.core.cache import cache
cache.clear()
create_page('Test', 'static.html', 'en', published=True)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
self.assertEqual(Placeholder.objects.all().count(), 4)
with self.login_user_context(user):
with self.assertNumQueries(FuzzyInt(40, 66)):
output = force_unicode(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
self.assertIn('<b>Test</b>', output)
self.assertEqual(Placeholder.objects.all().count(), 9)
self.assertEqual(StaticPlaceholder.objects.count(), 2)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
with self.assertNumQueries(FuzzyInt(40, 60)):
output = force_unicode(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
self.assertIn('<b>Test</b>', output)
with self.assertNumQueries(FuzzyInt(18, 48)):
force_unicode(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
with self.assertNumQueries(FuzzyInt(12, 30)):
force_unicode(self.client.get('/en/').content)
def test_tree_view_queries(self):
from django.core.cache import cache
cache.clear()
for i in range(10):
create_page('Test%s' % i, 'col_two.html', 'en', published=True)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
with self.login_user_context(user):
with self.assertNumQueries(FuzzyInt(18, 33)):
force_unicode(self.client.get('/en/admin/cms/page/'))
def test_smart_link_published_pages(self):
admin, staff_guy = self._get_guys()
page_url = '/en/admin/cms/page/published-pages/' # Not sure how to achieve this with reverse...
with self.login_user_context(staff_guy):
multi_title_page = create_page('main_title', 'col_two.html', 'en', published=True,
overwrite_url='overwritten_url',
menu_title='menu_title')
title = multi_title_page.get_title_obj()
title.page_title = 'page_title'
title.save()
multi_title_page.save()
publish_page(multi_title_page, admin, 'en')
# Non ajax call should return a 403 as this page shouldn't be accessed by anything else but ajax queries
self.assertEqual(403, self.client.get(page_url).status_code)
self.assertEqual(200,
self.client.get(page_url, HTTP_X_REQUESTED_WITH='XMLHttpRequest').status_code
)
# Test that the query param is working as expected.
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'main_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'menu_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'overwritten_url'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'page_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
class AdminPageEditContentSizeTests(AdminTestsBase):
"""
System user count influences the size of the page edit page,
but the users are only 2 times present on the page
The test relates to extra=0
at PagePermissionInlineAdminForm and ViewRestrictionInlineAdmin
"""
def test_editpage_contentsize(self):
"""
Expected a username only 2 times in the content, but a relationship
between usercount and pagesize
"""
with SettingsOverride(CMS_PERMISSION=True):
admin_user = self.get_superuser()
PAGE_NAME = 'TestPage'
USER_NAME = 'test_size_user_0'
current_site = Site.objects.get(pk=1)
page = create_page(PAGE_NAME, "nav_playground.html", "en", site=current_site, created_by=admin_user)
page.save()
self._page = page
with self.login_user_context(admin_user):
url = base.URL_CMS_PAGE_PERMISSION_CHANGE % self._page.pk
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
old_response_size = len(response.content)
old_user_count = get_user_model().objects.count()
# create additionals user and reload the page
get_user_model().objects.create_user(username=USER_NAME, email=USER_NAME + '@django-cms.org',
password=USER_NAME)
user_count = get_user_model().objects.count()
more_users_in_db = old_user_count < user_count
# we have more users
self.assertTrue(more_users_in_db, "New users got NOT created")
response = self.client.get(url)
new_response_size = len(response.content)
page_size_grown = old_response_size < new_response_size
# expect that the pagesize gets influenced by the useramount of the system
self.assertTrue(page_size_grown, "Page size has not grown after user creation")
# usernames are only 2 times in content
text = smart_str(response.content, response._charset)
foundcount = text.count(USER_NAME)
# 2 forms contain usernames as options
self.assertEqual(foundcount, 2,
"Username %s appeared %s times in response.content, expected 2 times" % (
USER_NAME, foundcount))
|
PYSEC-2017-11
|
cms/tests/publisher.py
|
@@ -342,7 +342,7 @@ def test_publish_home(self):
self.assertEqual(Page.objects.all().count(), 1)
superuser = self.get_superuser()
with self.login_user_context(superuser):
- response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
+ response = self.client.post(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], "http://testserver/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
@@ -381,7 +381,7 @@ def test_publish_admin(self):
page = self.create_page("test_admin", published=False)
superuser = self.get_superuser()
with self.login_user_context(superuser):
- response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
+ response = self.client.post(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = Page.objects.get(pk=page.pk)
@@ -396,7 +396,7 @@ def test_publish_wrong_lang(self):
):
with self.login_user_context(superuser):
with force_language('de'):
- response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
+ response = self.client.post(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = Page.objects.get(pk=page.pk)
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from django.contrib.sites.models import Site
from cms.utils.urlutils import admin_reverse
from djangocms_text_ckeditor.models import Text
from django.core.cache import cache
from django.core.management.base import CommandError
from django.core.management import call_command
from django.core.urlresolvers import reverse
from cms.api import create_page, add_plugin, create_title
from cms.constants import PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DEFAULT, PUBLISHER_STATE_DIRTY
from cms.management.commands import publisher_publish
from cms.models import CMSPlugin, Title
from cms.models.pagemodel import Page
from cms.plugin_pool import plugin_pool
from cms.test_utils.testcases import SettingsOverrideTestCase as TestCase
from cms.test_utils.util.context_managers import StdoutOverride, SettingsOverride
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import force_language
from cms.utils.compat.dj import get_user_model
class PublisherCommandTests(TestCase):
"""
Tests for the publish command
"""
def test_command_line_should_raise_without_superuser(self):
with self.assertRaises(CommandError):
com = publisher_publish.Command()
com.handle_noargs()
def test_command_line_publishes_zero_pages_on_empty_db(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 0)
self.assertEqual(published_from_output, 0)
def test_command_line_ignores_draft_page(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
create_page("The page!", "nav_playground.html", "en", published=False)
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 0)
self.assertEqual(published_from_output, 0)
self.assertEqual(Page.objects.public().count(), 0)
def test_command_line_publishes_draft_page(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
create_page("The page!", "nav_playground.html", "en", published=False)
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', include_unpublished=True)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
def test_command_line_publishes_selected_language(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
page = create_page("en title", "nav_playground.html", "en")
title = create_title('de', 'de title', page)
title.published = True
title.save()
title = create_title('fr', 'fr title', page)
title.published = True
title.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', language='de')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de'])
def test_command_line_publishes_selected_language_drafts(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
page = create_page("en title", "nav_playground.html", "en")
title = create_title('de', 'de title', page)
title.published = False
title.save()
title = create_title('fr', 'fr title', page)
title.published = False
title.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', language='de', include_unpublished=True)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de'])
def test_table_name_patching(self):
"""
This tests the plugin models patching when publishing from the command line
"""
User = get_user_model()
User.objects.create_superuser('djangocms', '[email protected]', '123456')
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
draft.publish('en')
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
# Manually undoing table name patching
Text._meta.db_table = 'djangocms_text_ckeditor_text'
plugin_pool.patched = False
with StdoutOverride():
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
def test_command_line_publishes_one_page(self):
"""
Publisher always creates two Page objects for every CMS page,
one is_draft and one is_public.
The public version of the page can be either published or not.
This bit of code uses sometimes manager methods and sometimes manual
filters on purpose (this helps test the managers)
"""
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
# Now, let's create a page. That actually creates 2 Page objects
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
# Sanity check the database (we should have one draft and one public)
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
# Now check that the non-draft has the attribute we set to the draft.
non_draft = Page.objects.public()[0]
self.assertEqual(non_draft.reverse_id, 'a_test')
def test_command_line_publish_multiple_languages(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
# Create a draft page with two published titles
page = create_page(u"The page!", "nav_playground.html", "en", published=False)
title = create_title('de', 'ja', page)
title.published = True
title.save()
title = create_title('fr', 'non', page)
title.published = True
title.save()
with StdoutOverride():
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de', 'fr'])
def test_command_line_publish_one_site(self):
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
siteA = Site.objects.create(domain='a.example.com', name='a.example.com')
siteB = Site.objects.create(domain='b.example.com', name='b.example.com')
#example.com
create_page(u"example.com homepage", "nav_playground.html", "en", published=True)
#a.example.com
create_page(u"a.example.com homepage", "nav_playground.html", "de", site=siteA, published=True)
#b.example.com
create_page(u"b.example.com homepage", "nav_playground.html", "de", site=siteB, published=True)
create_page(u"b.example.com about", "nav_playground.html", "nl", site=siteB, published=True)
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', site=siteB.id)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 2)
self.assertEqual(published_from_output, 2)
def test_command_line_publish_multiple_languages_check_count(self):
"""
Publishing one page with multiple languages still counts
as one page. This test case checks whether it works
as expected.
"""
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
# Now, let's create a page with 2 languages.
page = create_page("en title", "nav_playground.html", "en", published=True)
create_title("de", "de title", page)
page.publish("de")
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
def tearDown(self):
plugin_pool.patched = False
plugin_pool.set_plugin_meta()
class PublishingTests(TestCase):
def create_page(self, title=None, **kwargs):
return create_page(title or self._testMethodName,
"nav_playground.html", "en", **kwargs)
def test_publish_home(self):
name = self._testMethodName
page = self.create_page(name, published=False)
self.assertFalse(page.publisher_public_id)
self.assertEqual(Page.objects.all().count(), 1)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], "http://testserver/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
def test_publish_single(self):
name = self._testMethodName
page = self.create_page(name, published=False)
self.assertFalse(page.is_published('en'))
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published("en")
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
page.publish("en")
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published("en")
self.assertTrue(page.is_published('en'))
self.assertEqual(page.get_publisher_state("en"), PUBLISHER_STATE_DEFAULT)
self.assertIsNotNone(page.publisher_public)
self.assertTrue(page.publisher_public_id)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(public, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_publisher_state("en"), 0)
def test_publish_admin(self):
page = self.create_page("test_admin", published=False)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_publisher_state('en'), 0)
def test_publish_wrong_lang(self):
page = self.create_page("test_admin", published=False)
superuser = self.get_superuser()
with SettingsOverride(
LANGUAGES=(('de', 'de'), ('en', 'en')),
CMS_LANGUAGES={1: [{'code': 'en', 'name': 'en', 'fallbacks': ['fr', 'de'], 'public': True}]}
):
with self.login_user_context(superuser):
with force_language('de'):
response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = Page.objects.get(pk=page.pk)
def test_publish_child_first(self):
parent = self.create_page('parent', published=False)
child = self.create_page('child', published=False, parent=parent)
parent = parent.reload()
self.assertFalse(parent.is_published('en'))
self.assertFalse(child.is_published('en'))
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published('en')
for name in ('parent', 'child'):
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
child.publish("en")
child = child.reload()
self.assertTrue(child.is_published("en"))
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
self.assertIsNone(child.publisher_public)
# Since we have no parent, the state is otherwise unchanged
for name in ('parent', 'child'):
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
parent.publish("en")
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published('en')
# Cascade publish for all pending descendants
for name in ('parent', 'child'):
self.assertObjectExist(drafts, title_set__title=name)
page = drafts.get(title_set__title=name)
self.assertTrue(page.is_published("en"), name)
self.assertEqual(page.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT, name)
self.assertIsNotNone(page.publisher_public, name)
self.assertTrue(page.publisher_public.is_published('en'), name)
self.assertObjectExist(public, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
def test_simple_publisher(self):
"""
Creates the stuff needed for these tests.
Please keep this up-to-date (the docstring!)
A
/ \
B C
"""
# Create a simple tree of 3 pages
pageA = create_page("Page A", "nav_playground.html", "en",
published=True)
pageB = create_page("Page B", "nav_playground.html", "en", parent=pageA,
published=True)
pageC = create_page("Page C", "nav_playground.html", "en", parent=pageA,
published=False)
# Assert A and B are published, C unpublished
self.assertTrue(pageA.publisher_public_id)
self.assertTrue(pageB.publisher_public_id)
self.assertTrue(not pageC.publisher_public_id)
self.assertEqual(len(Page.objects.public().published("en")), 2)
# Let's publish C now.
pageC.publish("en")
# Assert all are published
self.assertTrue(pageA.publisher_public_id)
self.assertTrue(pageB.publisher_public_id)
self.assertTrue(pageC.publisher_public_id)
self.assertEqual(len(Page.objects.public().published("en")), 3)
def test_i18n_publishing(self):
page = self.create_page('parent', published=True)
self.assertEqual(Title.objects.all().count(), 2)
create_title("de", "vater", page)
self.assertEqual(Title.objects.all().count(), 3)
self.assertEqual(Title.objects.filter(published=True).count(), 2)
page.publish('de')
self.assertEqual(Title.objects.all().count(), 4)
self.assertEqual(Title.objects.filter(published=True).count(), 4)
def test_publish_ordering(self):
page = self.create_page('parent', published=True)
pageA = self.create_page('pageA', parent=page, published=True)
pageC = self.create_page('pageC', parent=page, published=True)
pageB = self.create_page('pageB', parent=page, published=True)
page = page.reload()
pageB.move_page(pageA, 'right')
pageB.publish("en")
# pageC needs reload since B has swapped places with it
pageC.reload().publish("en")
pageA.publish('en')
drafts = Page.objects.drafts().order_by('tree_id', 'lft')
draft_titles = [(p.get_title('en'), p.lft, p.rght) for p in drafts]
self.assertEqual([('parent', 1, 8),
('pageA', 2, 3),
('pageB', 4, 5),
('pageC', 6, 7)], draft_titles)
public = Page.objects.public().order_by('tree_id', 'lft')
public_titles = [(p.get_title('en'), p.lft, p.rght) for p in public]
self.assertEqual([('parent', 1, 8),
('pageA', 2, 3),
('pageB', 4, 5),
('pageC', 6, 7)], public_titles)
page.publish('en')
drafts = Page.objects.drafts().order_by('tree_id', 'lft')
draft_titles = [(p.get_title('en'), p.lft, p.rght) for p in drafts]
self.assertEqual([('parent', 1, 8),
('pageA', 2, 3),
('pageB', 4, 5),
('pageC', 6, 7)], draft_titles)
public = Page.objects.public().order_by('tree_id', 'lft')
public_titles = [(p.get_title('en'), p.lft, p.rght) for p in public]
self.assertEqual([('parent', 1, 8),
('pageA', 2, 3),
('pageB', 4, 5),
('pageC', 6, 7)], public_titles)
def test_publish_ordering2(self):
page = self.create_page('parent', published=False)
pageA = self.create_page('pageA', published=False)
pageC = self.create_page('pageC', published=False, parent=pageA)
pageB = self.create_page('pageB', published=False, parent=pageA)
page = page.reload()
pageA.publish('en')
pageB.publish('en')
pageC.publish('en')
page.publish('en')
drafts = Page.objects.filter(publisher_is_draft=True).order_by('tree_id', 'lft')
publics = Page.objects.filter(publisher_is_draft=False).order_by('tree_id', 'lft')
x = 0
for draft in drafts:
self.assertEqual(draft.publisher_public_id, publics[x].pk)
x += 1
def test_unpublish_unpublish(self):
name = self._testMethodName
page = self.create_page(name, published=True)
drafts = Page.objects.drafts()
published = Page.objects.public().published("en")
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
page.unpublish('en')
self.assertFalse(page.is_published('en'))
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
page.publish('en')
self.assertTrue(page.publisher_public_id)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
def test_delete_title_unpublish(self):
page = self.create_page('test', published=True)
sub_page = self.create_page('test2', published=True, parent=page)
self.assertTrue(sub_page.publisher_public.is_published('en'))
page.title_set.all().delete()
self.assertFalse(sub_page.publisher_public.is_published('en', force_reload=True))
def test_modify_child_while_pending(self):
home = self.create_page("Home", published=True, in_navigation=True)
child = self.create_page("Child", published=True, parent=home,
in_navigation=False)
home = home.reload()
home.unpublish('en')
self.assertEqual(Title.objects.count(), 4)
child = child.reload()
self.assertFalse(child.publisher_public.is_published('en'))
self.assertFalse(child.in_navigation)
self.assertFalse(child.publisher_public.in_navigation)
child.in_navigation = True
child.save()
child.publish('en')
child = self.reload(child)
self.assertEqual(Title.objects.count(), 4)
self.assertTrue(child.is_published('en'))
self.assertFalse(child.publisher_public.is_published('en'))
self.assertTrue(child.in_navigation)
self.assertTrue(child.publisher_public.in_navigation)
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
home.publish('en')
child = self.reload(child)
self.assertTrue(child.is_published('en'))
self.assertTrue(child.publisher_public_id)
self.assertTrue(child.publisher_public.in_navigation)
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
def test_republish_with_descendants(self):
home = self.create_page("Home", published=True)
child = self.create_page("Child", published=True, parent=home)
gc = self.create_page("GC", published=True, parent=child)
self.assertTrue(child.is_published("en"))
self.assertTrue(gc.is_published('en'))
home = home.reload()
home.unpublish('en')
child = self.reload(child)
gc = self.reload(gc)
self.assertTrue(child.is_published("en"))
self.assertTrue(gc.is_published("en"))
self.assertFalse(child.publisher_public.is_published("en"))
self.assertFalse(gc.publisher_public.is_published('en'))
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
self.assertEqual(gc.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
home.publish('en')
child = self.reload(child)
gc = self.reload(gc)
self.assertTrue(child.publisher_public_id)
self.assertTrue(gc.is_published('en'))
self.assertTrue(child.is_published('en'))
self.assertTrue(gc.publisher_public_id)
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
self.assertEqual(gc.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
def test_republish_with_dirty_children(self):
home = self.create_page("Home", published=True)
dirty1 = self.create_page("Dirty1", published=True, parent=home)
dirty2 = self.create_page("Dirty2", published=True, parent=home)
home = self.reload(home)
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
dirty1.in_navigation = True
dirty1.save()
home.unpublish('en')
dirty2.in_navigation = True
dirty2.save()
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
self.assertTrue(dirty1.is_published)
self.assertTrue(dirty2.publisher_public_id)
self.assertEqual(dirty1.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
self.assertEqual(dirty2.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
home = self.reload(home)
with self.assertNumQueries(FuzzyInt(0, 100)):
home.publish('en')
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
self.assertTrue(dirty1.is_published("en"))
self.assertTrue(dirty2.is_published("en"))
self.assertTrue(dirty1.publisher_public.is_published("en"))
self.assertTrue(dirty2.publisher_public.is_published("en"))
self.assertEqual(dirty1.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
self.assertEqual(dirty2.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
def test_republish_with_unpublished_child(self):
"""
Unpub1 was never published, and unpub2 has been unpublished after the
fact. None of the grandchildren should become published.
"""
home = self.create_page("Home", published=True)
unpub1 = self.create_page("Unpub1", published=False, parent=home)
unpub2 = self.create_page("Unpub2", published=True, parent=home)
gc1 = self.create_page("GC1", published=True, parent=unpub1)
gc2 = self.create_page("GC2", published=True, parent=unpub2)
self.assertFalse(gc1.publisher_public_id)
self.assertFalse(gc1.publisher_public_id)
self.assertTrue(gc1.is_published('en'))
self.assertTrue(gc2.is_published('en'))
home.unpublish('en')
unpub1 = self.reload(unpub1)
unpub2.unpublish('en') # Just marks this as not published
for page in (unpub1, unpub2):
self.assertFalse(page.is_published('en'), page)
self.assertEqual(page.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
self.assertIsNone(unpub1.publisher_public)
self.assertIsNotNone(unpub2.publisher_public)
self.assertFalse(unpub2.publisher_public.is_published('en'))
gc1 = self.reload(gc1)
gc2 = self.reload(gc2)
for page in (gc1, gc2):
self.assertTrue(page.is_published('en'))
self.assertEqual(page.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
self.assertIsNone(gc1.publisher_public)
self.assertIsNotNone(gc2.publisher_public)
self.assertFalse(gc2.publisher_public.is_published('en'))
def test_unpublish_with_descendants(self):
page = self.create_page("Page", published=True)
child = self.create_page("Child", parent=page, published=True)
self.create_page("Grandchild", parent=child, published=True)
page = page.reload()
child.reload()
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published("en")
self.assertEqual(published.count(), 3)
self.assertEqual(page.get_descendant_count(), 2)
base = reverse('pages-root')
for url in (base, base + 'child/', base + 'child/grandchild/'):
response = self.client.get(url)
self.assertEqual(response.status_code, 200, url)
for title in ('Page', 'Child', 'Grandchild'):
self.assertObjectExist(drafts, title_set__title=title)
self.assertObjectExist(public, title_set__title=title)
self.assertObjectExist(published, title_set__title=title)
item = drafts.get(title_set__title=title)
self.assertTrue(item.publisher_public_id)
self.assertEqual(item.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
self.assertTrue(page.unpublish('en'), 'Unpublish was not successful')
self.assertFalse(page.is_published('en'))
cache.clear()
for url in (base, base + 'child/', base + 'child/grandchild/'):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
for title in ('Page', 'Child', 'Grandchild'):
self.assertObjectExist(drafts, title_set__title=title)
self.assertObjectExist(public, title_set__title=title)
self.assertObjectDoesNotExist(published, title_set__title=title)
item = drafts.get(title_set__title=title)
if title == 'Page':
self.assertFalse(item.is_published("en"))
self.assertFalse(item.publisher_public.is_published("en"))
# Not sure what the proper state of these are after unpublish
#self.assertEqual(page.publisher_state, PUBLISHER_STATE_DEFAULT)
self.assertTrue(page.is_dirty('en'))
else:
# The changes to the published subpages are simply that the
# published flag of the PUBLIC instance goes to false, and the
# publisher state is set to mark waiting for parent
self.assertTrue(item.is_published('en'), title)
self.assertFalse(item.publisher_public.is_published('en'), title)
self.assertEqual(item.get_publisher_state('en'), PUBLISHER_STATE_PENDING,
title)
self.assertTrue(item.is_dirty('en'), title)
def test_unpublish_with_dirty_descendants(self):
page = self.create_page("Page", published=True)
child = self.create_page("Child", parent=page, published=True)
gchild = self.create_page("Grandchild", parent=child, published=True)
child.in_navigation = True
child.save()
self.assertTrue(child.is_dirty("en"))
self.assertFalse(gchild.is_dirty('en'))
self.assertTrue(child.publisher_public.is_published('en'))
self.assertTrue(gchild.publisher_public.is_published('en'))
page.unpublish('en')
child = self.reload(child)
gchild = self.reload(gchild)
# Descendants become dirty after unpublish
self.assertTrue(child.is_dirty('en'))
self.assertTrue(gchild.is_dirty('en'))
# However, their public version is still removed no matter what
self.assertFalse(child.publisher_public.is_published('en'))
self.assertFalse(gchild.publisher_public.is_published('en'))
def test_prepublish_descendants(self):
page = self.create_page("Page", published=True)
child = self.create_page("Child", parent=page, published=False)
gchild2 = self.create_page("Grandchild2", parent=child, published=False)
self.create_page("Grandchild3", parent=child, published=False)
gchild = self.create_page("Grandchild", published=True)
gchild.move_page(target=child, position='last-child')
gchild.publish('en')
self.assertFalse(child.is_published('en'))
self.assertTrue(gchild.is_published('en'))
self.assertEqual(gchild.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
child = child.reload()
child.publish('en')
gchild2 = gchild2.reload()
gchild2.publish('en')
self.assertTrue(child.is_published("en"))
self.assertTrue(gchild.is_published("en"))
self.assertEqual(gchild.get_publisher_state('en', force_reload=True), PUBLISHER_STATE_DEFAULT)
gchild = gchild.reload()
gchild2 = gchild2.reload()
self.assertEqual(gchild.lft, gchild.publisher_public.lft)
self.assertEqual(gchild.rght, gchild.publisher_public.rght)
def test_republish_multiple_root(self):
# TODO: The paths do not match expected behaviour
home = self.create_page("Page", published=True)
other = self.create_page("Another Page", published=True)
child = self.create_page("Child", published=True, parent=home)
child2 = self.create_page("Child", published=True, parent=other)
self.assertTrue(Page.objects.filter(is_home=True).count(), 2)
self.assertTrue(home.is_home)
home = home.reload()
self.assertTrue(home.publisher_public.is_home)
root = reverse('pages-root')
self.assertEqual(home.get_absolute_url(), root)
self.assertEqual(home.get_public_object().get_absolute_url(), root)
self.assertEqual(child.get_absolute_url(), root + 'child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'child/')
self.assertEqual(other.get_absolute_url(), root + 'another-page/')
self.assertEqual(other.get_public_object().get_absolute_url(), root + 'another-page/')
self.assertEqual(child2.get_absolute_url(), root + 'another-page/child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'another-page/child/')
home = self.reload(home)
home.unpublish('en')
home = self.reload(home)
other = self.reload(other)
child = self.reload(child)
child2 = self.reload(child2)
self.assertFalse(home.is_home)
self.assertFalse(home.publisher_public.is_home)
self.assertTrue(other.is_home)
self.assertTrue(other.publisher_public.is_home)
self.assertEqual(other.get_absolute_url(), root)
self.assertEqual(other.get_public_object().get_absolute_url(), root)
self.assertEqual(home.get_absolute_url(), root + 'page/')
self.assertEqual(home.get_public_object().get_absolute_url(), root + 'page/')
self.assertEqual(child.get_absolute_url(), root + 'page/child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'page/child/')
self.assertEqual(child2.get_absolute_url(), root + 'child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'child/')
home.publish('en')
home = self.reload(home)
other = self.reload(other)
child = self.reload(child)
child2 = self.reload(child2)
self.assertTrue(home.is_home)
self.assertTrue(home.publisher_public.is_home)
self.assertEqual(home.get_absolute_url(), root)
self.assertEqual(home.get_public_object().get_absolute_url(), root)
self.assertEqual(child.get_absolute_url(), root + 'child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'child/')
self.assertEqual(other.get_absolute_url(), root + 'another-page/')
self.assertEqual(other.get_public_object().get_absolute_url(), root + 'another-page/')
self.assertEqual(child2.get_absolute_url(), root + 'another-page/child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'another-page/child/')
def test_revert_contents(self):
user = self.get_superuser()
page = create_page("Page", "nav_playground.html", "en", published=True,
created_by=user)
placeholder = page.placeholders.get(slot=u"body")
deleted_plugin = add_plugin(placeholder, u"TextPlugin", u"en", body="Deleted content")
text_plugin = add_plugin(placeholder, u"TextPlugin", u"en", body="Public content")
page.publish('en')
# Modify and delete plugins
text_plugin.body = "<p>Draft content</p>"
text_plugin.save()
deleted_plugin.delete()
self.assertEqual(CMSPlugin.objects.count(), 3)
# Now let's revert and restore
page.revert('en')
self.assertEqual(page.get_publisher_state("en"), PUBLISHER_STATE_DEFAULT)
self.assertEqual(CMSPlugin.objects.count(), 4)
plugins = CMSPlugin.objects.filter(placeholder__page=page)
self.assertEqual(plugins.count(), 2)
plugins = [plugin.get_plugin_instance()[0] for plugin in plugins]
self.assertEqual(plugins[0].body, "Deleted content")
self.assertEqual(plugins[1].body, "Public content")
def test_revert_move(self):
parent = create_page("Parent", "nav_playground.html", "en", published=True)
parent_url = parent.get_absolute_url()
page = create_page("Page", "nav_playground.html", "en", published=True,
parent=parent)
other = create_page("Other", "nav_playground.html", "en", published=True)
other_url = other.get_absolute_url()
child = create_page("Child", "nav_playground.html", "en", published=True,
parent=page)
parent = parent.reload()
page = page.reload()
self.assertEqual(page.get_absolute_url(), parent_url + "page/")
self.assertEqual(child.get_absolute_url(), parent_url + "page/child/")
# Now let's move it (and the child)
page.move_page(other)
page = self.reload(page)
child = self.reload(child)
self.assertEqual(page.get_absolute_url(), other_url + "page/")
self.assertEqual(child.get_absolute_url(), other_url + "page/child/")
# Public version changed the url as well
self.assertEqual(page.publisher_public.get_absolute_url(), other_url + "page/")
self.assertEqual(child.publisher_public.get_absolute_url(), other_url + "page/child/")
def test_publish_works_with_descendants(self):
"""
For help understanding what this tests for, see:
http://articles.sitepoint.com/print/hierarchical-data-database
Creates this published structure:
home
/ \
item1 item2
/ \
subitem1 subitem2
"""
home_page = create_page("home", "nav_playground.html", "en",
published=True, in_navigation=False)
create_page("item1", "nav_playground.html", "en", parent=home_page,
published=True)
item2 = create_page("item2", "nav_playground.html", "en", parent=home_page,
published=True)
create_page("subitem1", "nav_playground.html", "en", parent=item2,
published=True)
create_page("subitem2", "nav_playground.html", "en", parent=item2,
published=True)
item2 = item2.reload()
not_drafts = list(Page.objects.filter(publisher_is_draft=False).order_by('lft'))
drafts = list(Page.objects.filter(publisher_is_draft=True).order_by('lft'))
self.assertEqual(len(not_drafts), 5)
self.assertEqual(len(drafts), 5)
for idx, draft in enumerate(drafts):
public = not_drafts[idx]
# Check that a node doesn't become a root node magically
self.assertEqual(bool(public.parent_id), bool(draft.parent_id))
if public.parent:
# Let's assert the MPTT tree is consistent
self.assertTrue(public.lft > public.parent.lft)
self.assertTrue(public.rght < public.parent.rght)
self.assertEqual(public.tree_id, public.parent.tree_id)
self.assertTrue(public.parent in public.get_ancestors())
self.assertTrue(public in public.parent.get_descendants())
self.assertTrue(public in public.parent.get_children())
if draft.parent:
# Same principle for the draft tree
self.assertTrue(draft.lft > draft.parent.lft)
self.assertTrue(draft.rght < draft.parent.rght)
self.assertEqual(draft.tree_id, draft.parent.tree_id)
self.assertTrue(draft.parent in draft.get_ancestors())
self.assertTrue(draft in draft.parent.get_descendants())
self.assertTrue(draft in draft.parent.get_children())
# Now call publish again. The structure should not change.
item2.publish('en')
not_drafts = list(Page.objects.filter(publisher_is_draft=False).order_by('lft'))
drafts = list(Page.objects.filter(publisher_is_draft=True).order_by('lft'))
self.assertEqual(len(not_drafts), 5)
self.assertEqual(len(drafts), 5)
for idx, draft in enumerate(drafts):
public = not_drafts[idx]
# Check that a node doesn't become a root node magically
self.assertEqual(bool(public.parent_id), bool(draft.parent_id))
if public.parent:
# Let's assert the MPTT tree is consistent
self.assertTrue(public.lft > public.parent.lft)
self.assertTrue(public.rght < public.parent.rght)
self.assertEqual(public.tree_id, public.parent.tree_id)
self.assertTrue(public.parent in public.get_ancestors())
self.assertTrue(public in public.parent.get_descendants())
self.assertTrue(public in public.parent.get_children())
if draft.parent:
# Same principle for the draft tree
self.assertTrue(draft.lft > draft.parent.lft)
self.assertTrue(draft.rght < draft.parent.rght)
self.assertEqual(draft.tree_id, draft.parent.tree_id)
self.assertTrue(draft.parent in draft.get_ancestors())
self.assertTrue(draft in draft.parent.get_descendants())
self.assertTrue(draft in draft.parent.get_children())
|
PYSEC-2017-11
|
cms/tests/reversion_tests.py
|
@@ -246,7 +246,7 @@ def test_publish_limits(self):
self.assertEqual(Revision.objects.all().count(), 5)
for x in range(10):
publish_url = URL_CMS_PAGE + "%s/en/publish/" % page_pk
- response = self.client.get(publish_url)
+ response = self.client.post(publish_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(Revision.objects.all().count(), 4)
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import shutil
from os.path import join
from cms.utils.conf import get_cms_setting
from cms.utils.urlutils import admin_reverse
from djangocms_text_ckeditor.models import Text
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.files.uploadedfile import SimpleUploadedFile
import reversion
from reversion.models import Revision, Version
from cms.models import Page, Title, Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.project.fileapp.models import FileModel
from cms.test_utils.testcases import CMSTestCase, TransactionCMSTestCase, URL_CMS_PAGE, URL_CMS_PAGE_CHANGE, URL_CMS_PAGE_ADD, \
URL_CMS_PLUGIN_ADD, URL_CMS_PLUGIN_EDIT
from cms.test_utils.util.context_managers import SettingsOverride
if hasattr(reversion.models, 'VERSION_CHANGE'):
from reversion.models import VERSION_CHANGE
class BasicReversionTestCase(CMSTestCase):
def setUp(self):
self.user = self._create_user("test", True, True)
def test_number_revisions(self):
with self.login_user_context(self.user):
self.assertEqual(Revision.objects.all().count(), 0)
self.page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(Revision.objects.all().count(), 1)
class ReversionTestCase(TransactionCMSTestCase):
def setUp(self):
u = self._create_user("test", True, True)
with self.login_user_context(u):
# add a new text plugin
self.page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE)
page = Page.objects.all()[0]
placeholderpk = page.placeholders.get(slot="body").pk
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': placeholderpk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content.decode('utf8').split("edit-plugin/")[1].split("/")[
0] + "/"
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(edit_url, {"body": "Hello World"})
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual("Hello World", txt.body)
self.txt = txt
# change the content
response = self.client.post(edit_url, {"body": "Bye Bye World"})
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual("Bye Bye World", txt.body)
p_data = self.page_data.copy()
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk, p_data)
self.assertRedirects(response, URL_CMS_PAGE)
page.publish('en')
self.user = u
def test_revert(self):
"""
Test that you can revert a plugin
"""
with self.login_user_context(self.user):
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
self.assertEqual(Revision.objects.all().count(), 5)
ctype = ContentType.objects.get_for_model(Page)
revision = Revision.objects.all()[2]
version = Version.objects.get(content_type=ctype, revision=revision)
page = Page.objects.all()[0]
history_url = URL_CMS_PAGE_CHANGE % (page.pk) + "history/"
response = self.client.get(history_url)
self.assertEqual(response.status_code, 200)
revert_url = history_url + "%s/" % version.pk
response = self.client.get(revert_url)
self.assertEqual(response.status_code, 200)
response = self.client.post("%s?language=en&" % revert_url, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE_CHANGE % page.pk)
# test for publisher_is_draft, published is set for both draft and
# published page
self.assertEqual(Page.objects.all()[0].publisher_is_draft, True)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
# test that CMSPlugin subclasses are reverted
self.assertEqual(Text.objects.all().count(), 2)
self.assertEqual(Text.objects.get(pk=self.txt.pk).body, "Hello World")
self.assertEqual(Revision.objects.all().count(), 6)
def test_undo_redo(self):
"""
Test that you can revert a plugin
"""
with self.login_user_context(self.user):
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
self.assertEqual(Revision.objects.all().count(), 5)
self.assertEqual(Placeholder.objects.count(), 5)
ctype = ContentType.objects.get_for_model(Page)
revision = Revision.objects.all()[2]
Version.objects.get(content_type=ctype, revision=revision)
page = Page.objects.all()[0]
undo_url = admin_reverse("cms_page_undo", args=[page.pk])
response = self.client.post(undo_url)
self.assertEqual(response.status_code, 200)
page = Page.objects.all()[0]
self.assertTrue(page.revision_id != 0)
rev = page.revision_id
redo_url = admin_reverse("cms_page_redo", args=[page.pk])
response = self.client.post(redo_url)
self.assertEqual(response.status_code, 200)
page = Page.objects.all()[0]
self.assertTrue(page.revision_id != rev)
txt = Text.objects.all()[0]
edit_url = URL_CMS_PLUGIN_EDIT + str(txt.pk) + "/"
response = self.client.post(edit_url, {"body": "Hello World2"})
self.assertEqual(response.status_code, 200)
page = Page.objects.all()[0]
self.assertEqual(page.revision_id, 0)
self.assertEqual(2, CMSPlugin.objects.all().count())
placeholderpk = page.placeholders.filter(slot="body")[0].pk
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': placeholderpk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content.decode('utf8').split("edit-plugin/")[1].split("/")[
0] + "/"
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(edit_url, {"body": "Hello World"})
self.assertEqual(response.status_code, 200)
self.assertEqual(3, CMSPlugin.objects.all().count())
self.client.post(undo_url)
self.client.post(undo_url)
self.assertEqual(2, CMSPlugin.objects.all().count())
self.assertEqual(Placeholder.objects.count(), 5)
def test_undo_slug_collision(self):
data1 = self.get_new_page_data()
data2 = self.get_new_page_data()
data1['slug'] = 'page1'
data2['slug'] = 'page2'
with self.login_user_context(self.get_superuser()):
response = self.client.post(URL_CMS_PAGE_ADD, data1)
self.assertEqual(response.status_code, 302)
response = self.client.post(URL_CMS_PAGE_ADD, data2)
self.assertEqual(response.status_code, 302)
page1 = Page.objects.get(title_set__slug='page1')
page2 = Page.objects.get(title_set__slug='page2')
data1['slug'] = 'page3'
response = self.client.post(URL_CMS_PAGE_CHANGE % page1.pk, data1)
self.assertEqual(response.status_code, 302)
data2['slug'] = 'page1'
response = self.client.post(URL_CMS_PAGE_CHANGE % page2.pk, data2)
self.assertEqual(response.status_code, 302)
undo_url = admin_reverse("cms_page_undo", args=[page1.pk])
response = self.client.post(undo_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(Title.objects.get(page=page1).slug, 'page3')
response = self.client.get(admin_reverse("cms_page_changelist"))
self.assertEqual(response.status_code, 200)
response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
response = self.client.get('/en/page1/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
def test_recover(self):
"""
Test that you can recover a page
"""
with self.login_user_context(self.user):
self.assertEqual(Revision.objects.all().count(), 5)
ctype = ContentType.objects.get_for_model(Page)
revision = Revision.objects.all()[4]
version = Version.objects.filter(content_type=ctype, revision=revision)[0]
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
self.assertEqual(Text.objects.all().count(), 2)
page = Page.objects.all()[0]
page_pk = page.pk
page.delete()
self.assertEqual(Page.objects.all().count(), 0)
self.assertEqual(CMSPlugin.objects.all().count(), 0)
self.assertEqual(Text.objects.all().count(), 0)
recover_url = URL_CMS_PAGE + "recover/"
response = self.client.get(recover_url)
self.assertEqual(response.status_code, 200)
recover_url += "%s/" % version.pk
response = self.client.get(recover_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(recover_url, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE_CHANGE % page_pk)
self.assertEqual(Page.objects.all().count(), 1)
self.assertEqual(CMSPlugin.objects.all().count(), 1)
# test that CMSPlugin subclasses are recovered
self.assertEqual(Text.objects.all().count(), 1)
def test_publish_limits(self):
with self.login_user_context(self.user):
with SettingsOverride(CMS_MAX_PAGE_PUBLISH_REVERSIONS=2, CMS_MAX_PAGE_HISTORY_REVERSIONS=2):
page = Page.objects.all()[0]
page_pk = page.pk
self.assertEqual(Revision.objects.all().count(), 5)
for x in range(10):
publish_url = URL_CMS_PAGE + "%s/en/publish/" % page_pk
response = self.client.get(publish_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(Revision.objects.all().count(), 4)
class ReversionFileFieldTests(CMSTestCase):
def tearDown(self):
shutil.rmtree(join(settings.MEDIA_ROOT, 'fileapp'))
def test_file_persistence(self):
content = b'content1'
with reversion.create_revision():
# add a file instance
file1 = FileModel()
file1.test_file.save('file1.txt', SimpleUploadedFile('file1.txt', content), False)
file1.save()
# manually add a revision because we use the explicit way
# django-cms uses too.
adapter = reversion.get_adapter(FileModel)
if hasattr(reversion.models, 'VERSION_CHANGE'):
reversion.revision_context_manager.add_to_context(
reversion.default_revision_manager, file1,
adapter.get_version_data(file1, VERSION_CHANGE))
else:
reversion.revision_context_manager.add_to_context(
reversion.default_revision_manager, file1,
adapter.get_version_data(file1))
# reload the instance from db
file2 = FileModel.objects.all()[0]
# delete the instance.
file2.delete()
# revert the old version
file_version = reversion.get_for_object(file1)[0]
file_version.revert()
# reload the reverted instance and check for its content
file1 = FileModel.objects.all()[0]
self.assertEqual(file1.test_file.file.read(), content)
|
PYSEC-2017-11
|
django/core/files/storage.py
|
@@ -1,12 +1,12 @@
import os
import errno
-import itertools
from datetime import datetime
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import locks, File
from django.core.files.move import file_move_safe
+from django.utils.crypto import get_random_string
from django.utils.encoding import force_text, filepath_to_uri
from django.utils.functional import LazyObject
from django.utils.module_loading import import_by_path
@@ -67,13 +67,12 @@ def get_available_name(self, name):
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
- # If the filename already exists, add an underscore and a number (before
- # the file extension, if one exists) to the filename until the generated
- # filename doesn't exist.
- count = itertools.count(1)
+ # If the filename already exists, add an underscore and a random 7
+ # character alphanumeric string (before the file extension, if one
+ # exists) to the filename until the generated filename doesn't exist.
while self.exists(name):
# file_ext includes the dot.
- name = os.path.join(dir_name, "%s_%s%s" % (file_root, next(count), file_ext))
+ name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
return name
|
import os
import errno
import itertools
from datetime import datetime
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import locks, File
from django.core.files.move import file_move_safe
from django.utils.encoding import force_text, filepath_to_uri
from django.utils.functional import LazyObject
from django.utils.module_loading import import_by_path
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.text import get_valid_filename
from django.utils._os import safe_join, abspathu
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb'):
"""
Retrieves the specified file from storage.
"""
return self._open(name, mode)
def save(self, name, content):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content)
name = self.get_available_name(name)
name = self._save(name, content)
# Store filenames with forward slashes, even on Windows
return force_text(name.replace('\\', '/'))
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number (before
# the file extension, if one exists) to the filename until the generated
# filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, next(count), file_ext))
return name
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError()
def exists(self, name):
"""
Returns True if a file referened by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError()
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError()
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError()
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError()
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None):
if location is None:
location = settings.MEDIA_ROOT
self.base_location = location
self.location = abspathu(self.base_location)
if base_url is None:
base_url = settings.MEDIA_URL
self.base_url = base_url
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
# Note that there is a race between os.path.exists and os.makedirs:
# if os.makedirs fails with EEXIST, the directory was created
# concurrently, and we can continue normally. Refs #16082.
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
content.close()
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL |
getattr(os, 'O_BINARY', 0))
# The current umask value is masked out by os.open!
fd = os.open(full_path, flags, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = 'wb' if isinstance(chunk, bytes) else 'wt'
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except OSError as e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if settings.FILE_UPLOAD_PERMISSIONS is not None:
os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS)
return name
def delete(self, name):
assert name, "The name argument is not allowed to be empty."
name = self.path(name)
# If the file exists, delete it from the filesystem.
# Note that there is a race between os.path.exists and os.remove:
# if os.remove fails with ENOENT, the file was removed
# concurrently, and we can continue normally.
if os.path.exists(name):
try:
os.remove(name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
try:
path = safe_join(self.location, name)
except ValueError:
raise SuspiciousFileOperation("Attempted access to '%s' denied." % name)
return os.path.normpath(path)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urljoin(self.base_url, filepath_to_uri(name))
def accessed_time(self, name):
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
return import_by_path(import_path or settings.DEFAULT_FILE_STORAGE)
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
|
GHSA-296w-6qhq-gf92
|
tests/file_storage/tests.py
|
@@ -35,6 +35,9 @@
Image = None
+FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
+
+
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
@@ -430,10 +433,9 @@ def test_race_condition(self):
self.thread.start()
name = self.save_file('conflict')
self.thread.join()
- self.assertTrue(self.storage.exists('conflict'))
- self.assertTrue(self.storage.exists('conflict_1'))
- self.storage.delete('conflict')
- self.storage.delete('conflict_1')
+ files = sorted(os.listdir(self.storage_dir))
+ self.assertEqual(files[0], 'conflict')
+ six.assertRegex(self, files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
@@ -477,9 +479,10 @@ def test_directory_with_dot(self):
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
+ files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
- self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test')))
- self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test_1')))
+ self.assertEqual(files[0], 'test')
+ six.assertRegex(self, files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
@@ -489,8 +492,10 @@ def test_first_character_dot(self):
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
- self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test')))
- self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test_1')))
+ files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
+ self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
+ self.assertEqual(files[0], '.test')
+ six.assertRegex(self, files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class DimensionClosingBug(unittest.TestCase):
"""
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import time
import zlib
from datetime import datetime, timedelta
from io import BytesIO
try:
import threading
except ImportError:
import dummy_threading as threading
from django.conf import settings
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
from django.core.files.base import File, ContentFile
from django.core.files.images import get_image_dimensions
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import UploadedFile
from django.test import LiveServerTestCase, SimpleTestCase
from django.test.utils import override_settings
from django.utils import six
from django.utils import unittest
from django.utils.six.moves.urllib.request import urlopen
from django.utils._os import upath
try:
from django.utils.image import Image
except ImproperlyConfigured:
Image = None
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImproperlyConfigured,
"Error importing module storage: \"No module named '?storage'?\""):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaisesMessage(
ImproperlyConfigured,
'Module "django.core.files.storage" does not define a '
'"NonExistingStorage" attribute/class',
get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImproperlyConfigured,
"Error importing module django.core.files.non_existing_storage: "
"\"No module named '?(django.core.files.)?non_existing_storage'?\""):
get_storage_class(
'django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_emtpy_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.accessed_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.created_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.modified_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file',
ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""),
"""/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""")
# should stanslate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""),
"""/test_media_url/a/b/c.file""")
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
f = self.storage.save('storage_test_1', ContentFile('custom content'))
f = self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), set(['storage_dir_1']))
self.assertEqual(set(files),
set(['storage_test_1', 'storage_test_2']))
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case),
temp_storage.path(mixed_case))
temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file',
ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file',
ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
self.assertRaises(OSError,
self.storage.save, 'error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
self.assertRaises(OSError, self.storage.delete, 'error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behaviour when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class UnicodeFileNameTests(unittest.TestCase):
def test_unicode_file_names(self):
"""
Regression test for #8156: files with unicode names I can't quite figure
out the encoding situation between doctest and this file, but the actual
repr doesn't matter; it just shouldn't return a unicode object.
"""
uf = UploadedFile(name='¿Cómo?',content_type='text')
self.assertEqual(type(uf.__repr__()), str)
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
name = self.save_file('conflict')
self.thread.join()
self.assertTrue(self.storage.exists('conflict'))
self.assertTrue(self.storage.exists('conflict_1'))
self.storage.delete('conflict')
self.storage.delete('conflict_1')
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test_1')))
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test_1')))
class DimensionClosingBug(unittest.TestCase):
"""
Test that get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "Pillow/PIL not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = BytesIO()
try:
get_image_dimensions(empty_io)
finally:
self.assertTrue(not empty_io.closed)
@unittest.skipUnless(Image, "Pillow/PIL not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper(object):
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
from django.core.files import images
images.open = catching_open
try:
get_image_dimensions(os.path.join(os.path.dirname(upath(__file__)), "test1.png"))
finally:
del images.open
self.assertTrue(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
Test that get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "Pillow/PIL not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
from django.core.files.images import ImageFile
img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png")
image = ImageFile(open(img_path, 'rb'))
image_pil = Image.open(img_path)
size_1, size_2 = get_image_dimensions(image), get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
@unittest.skipUnless(Image, "Pillow/PIL not installed")
def test_bug_19457(self):
"""
Regression test for #19457
get_image_dimensions fails on some pngs, while Image.size is working good on them
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "magic.png")
try:
size = get_image_dimensions(img_path)
except zlib.error:
self.fail("Exception raised from get_image_dimensions().")
self.assertEqual(size, Image.open(img_path).size)
class ContentFileTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_file_default_name(self):
self.assertEqual(ContentFile(b"content").name, None)
def test_content_file_custom_name(self):
"""
Test that the constructor of ContentFile accepts 'name' (#16590).
"""
name = "I can have a name too!"
self.assertEqual(ContentFile(b"content", name=name).name, name)
def test_content_file_input_type(self):
"""
Test that ContentFile can accept both bytes and unicode and that the
retrieved content is of the same type.
"""
self.assertIsInstance(ContentFile(b"content").read(), bytes)
if six.PY3:
self.assertIsInstance(ContentFile("español").read(), six.text_type)
else:
self.assertIsInstance(ContentFile("español").read(), bytes)
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
class NoNameFileTestCase(unittest.TestCase):
"""
Other examples of unnamed files may be tempfile.SpooledTemporaryFile or
urllib.urlopen()
"""
def test_noname_file_default_name(self):
self.assertEqual(File(BytesIO(b'A file with no name')).name, None)
def test_noname_file_get_size(self):
self.assertEqual(File(BytesIO(b'A file with no name')).size, 19)
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
urls = 'file_storage.urls'
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
GHSA-296w-6qhq-gf92
|
tests/files/tests.py
|
@@ -13,12 +13,15 @@
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.files.temp import NamedTemporaryFile
from django.test import TestCase
-from django.utils import unittest
+from django.utils import six, unittest
from django.utils.six import StringIO
from .models import Storage, temp_storage, temp_storage_location
+FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
+
+
class FileStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
@@ -64,27 +67,28 @@ def test_files(self):
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
- self.assertEqual(obj2.normal.name, "tests/django_test_1.txt")
+ obj2_name = obj2.normal.name
+ six.assertRegex(self, obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
# Push the objects into the cache to make sure they pickle properly
cache.set("obj1", obj1)
cache.set("obj2", obj2)
- self.assertEqual(cache.get("obj2").normal.name, "tests/django_test_1.txt")
+ six.assertRegex(self, cache.get("obj2").normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
- self.assertEqual(obj2.normal.name, "tests/django_test_2.txt")
+ self.assertNotEqual(obj2_name, obj2.normal.name)
+ six.assertRegex(self, obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
# Multiple files with the same name get _N appended to them.
- objs = [Storage() for i in range(3)]
+ objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
- self.assertEqual(
- [o.normal.name for o in objs],
- ["tests/multiple_files.txt", "tests/multiple_files_1.txt", "tests/multiple_files_2.txt"]
- )
+ names = [o.normal.name for o in objs]
+ self.assertEqual(names[0], "tests/multiple_files.txt")
+ six.assertRegex(self, names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
for o in objs:
o.delete()
|
from __future__ import absolute_import
from io import BytesIO
import os
import gzip
import shutil
import tempfile
from django.core.cache import cache
from django.core.files import File
from django.core.files.move import file_move_safe
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.files.temp import NamedTemporaryFile
from django.test import TestCase
from django.utils import unittest
from django.utils.six import StringIO
from .models import Storage, temp_storage, temp_storage_location
class FileStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def test_files(self):
temp_storage.save('tests/default.txt', ContentFile('default content'))
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertEqual(sorted(files), ["default.txt", "django_test.txt"])
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(
sorted(files), ["assignment.txt", "default.txt", "django_test.txt"]
)
# Files can be read in a little at a time, if necessary.
obj1.normal.open()
self.assertEqual(obj1.normal.read(3), b"con")
self.assertEqual(obj1.normal.read(), b"tent")
self.assertEqual(list(obj1.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj1.normal.close()
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_1.txt")
self.assertEqual(obj2.normal.size, 12)
# Push the objects into the cache to make sure they pickle properly
cache.set("obj1", obj1)
cache.set("obj2", obj2)
self.assertEqual(cache.get("obj2").normal.name, "tests/django_test_1.txt")
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_2.txt")
# Multiple files with the same name get _N appended to them.
objs = [Storage() for i in range(3)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
self.assertEqual(
[o.normal.name for o in objs],
["tests/multiple_files.txt", "tests/multiple_files_1.txt", "tests/multiple_files_2.txt"]
)
for o in objs:
o.delete()
# Default values allow an object to access a single file.
obj3 = Storage.objects.create()
self.assertEqual(obj3.default.name, "tests/default.txt")
self.assertEqual(obj3.default.read(), b"default content")
obj3.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj3.delete()
obj3 = Storage()
self.assertEqual(obj3.default.read(), b"default content")
obj3.default.close()
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj4 = Storage()
obj4.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj4.random.name.endswith("/random_file"))
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileTests(unittest.TestCase):
def test_context_manager(self):
orig_file = tempfile.TemporaryFile()
base_file = File(orig_file)
with base_file as f:
self.assertIs(base_file, f)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
self.assertTrue(orig_file.closed)
def test_namedtemporaryfile_closes(self):
"""
The symbol django.core.files.NamedTemporaryFile is assigned as
a different class on different operating systems. In
any case, the result should minimally mock some of the API of
tempfile.NamedTemporaryFile from the Python standard library.
"""
tempfile = NamedTemporaryFile()
self.assertTrue(hasattr(tempfile, "closed"))
self.assertFalse(tempfile.closed)
tempfile.close()
self.assertTrue(tempfile.closed)
def test_file_mode(self):
# Should not set mode to None if it is not present.
# See #14681, stdlib gzip module crashes if mode is set to None
file = SimpleUploadedFile("mode_test.txt", b"content")
self.assertFalse(hasattr(file, 'mode'))
g = gzip.GzipFile(fileobj=file)
def test_file_iteration(self):
"""
File objects should yield lines when iterated over.
Refs #22107.
"""
file = File(BytesIO(b'one\ntwo\nthree'))
self.assertEqual(list(file), [b'one\n', b'two\n', b'three'])
class FileMoveSafeTests(unittest.TestCase):
def test_file_move_overwrite(self):
handle_a, self.file_a = tempfile.mkstemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
handle_b, self.file_b = tempfile.mkstemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
# file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False
self.assertRaises(IOError, lambda: file_move_safe(self.file_a, self.file_b, allow_overwrite=False))
# should allow it and continue on if allow_overwrite is True
self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True))
os.close(handle_a)
os.close(handle_b)
|
GHSA-296w-6qhq-gf92
|
tensorflow/python/kernel_tests/collective_ops_test.py
|
@@ -1182,6 +1182,69 @@ def f():
self.assertAllEqual(self.evaluate(f()), [[3.], [3.]])
[email protected](
+ combinations.times(
+ combinations.combine(collective_op=[
+ combinations.NamedObject('all_reduce_v2',
+ CollectiveOpsV2.all_reduce),
+ combinations.NamedObject('all_gather_v2',
+ CollectiveOpsV2.all_gather)
+ ]), device_combination))
+class InvalidInputTest(test.TestCase, parameterized.TestCase):
+
+ def setUp(self):
+ _setup_context()
+ super().setUp()
+
+ def testInvalidGroupKey(self, collective_op, device, communication):
+ dev0 = '/device:%s:0' % device
+ group_size = 2
+ group_key = [100]
+ instance_key = 100
+ in_tensor = constant_op.constant([1.])
+
+ with self.assertRaises(errors.InvalidArgumentError):
+ with ops.device(dev0):
+ collective_op(
+ in_tensor,
+ group_size,
+ group_key,
+ instance_key,
+ communication_hint=communication)
+
+ def testInvalidGroupSize(self, collective_op, device, communication):
+ dev0 = '/device:%s:0' % device
+ group_size = -2
+ group_key = 100
+ instance_key = 100
+ in_tensor = constant_op.constant([1.])
+
+ with self.assertRaises(errors.InvalidArgumentError):
+ with ops.device(dev0):
+ collective_op(
+ in_tensor,
+ group_size,
+ group_key,
+ instance_key,
+ communication_hint=communication)
+
+ def testInvalidInstanceKey(self, collective_op, device, communication):
+ dev0 = '/device:%s:0' % device
+ group_size = 2
+ group_key = 100
+ instance_key = [100]
+ in_tensor = constant_op.constant([1.])
+
+ with self.assertRaises(errors.InvalidArgumentError):
+ with ops.device(dev0):
+ collective_op(
+ in_tensor,
+ group_size,
+ group_key,
+ instance_key,
+ communication_hint=communication)
+
+
class CollectiveOpsV3Test(test.TestCase, parameterized.TestCase):
def setUp(self):
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.experimental.ops import testing as dataset_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
broadcast_send = _collective_ops.broadcast_send
broadcast_recv = _collective_ops.broadcast_recv
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def broadcast_send(t, shape, dtype, group_size, group_key, instance_key,
*args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.broadcast_send_v2(t, group_size, group_key,
instance_key, *args, **kwargs)
@staticmethod
def broadcast_recv(shape, dtype, group_size, group_key, instance_key, *args,
**kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
shape = array_ops.identity(shape)
return _collective_ops.broadcast_recv_v2(
shape, dtype, group_size, group_key, instance_key, *args, **kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
collective_op_combinations = combinations.combine(collective_op=[
combinations.NamedObject('all_reduce', CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2', CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather', CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2', CollectiveOpsV2.all_gather)
])
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testBroadcast(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_broadcast_2devices():
shape = [3]
in_value = constant_op.constant([1., 2., 3.], shape=shape)
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.broadcast_send(
in_value,
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.broadcast_recv(
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
for result in run_broadcast_2devices():
self.assertAllClose(result, [1., 2., 3.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key=100,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key=200,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testCollectiveInvalidKey(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with self.assertRaisesRegex(
errors.InternalError, 'instance 100 expected type 0 and data_type 1 but'
' got type 2 and data_type 1'):
with ops.device(dev0):
collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager',
max_subdivs_per_device=[-1, 0, 16]), device_combination))
class AllReduceWithSubdivisionsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication,
max_subdivs_per_device):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
if max_subdivs_per_device == -1:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
else:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication,
max_subdivs_per_device=max_subdivs_per_device)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
@combinations.generate(
combinations.combine(required_physical_gpus=2, mode='eager'))
class XlaTest(test.TestCase, parameterized.TestCase):
def testReduce(self):
device0 = '/device:GPU:0'
device1 = '/device:GPU:1'
group_size = 2
group_key = 100
instance_key = 100
results = []
def all_reduce(device):
@def_function.function(jit_compile=True)
def f():
return _collective_ops.all_reduce_v2([1.], group_size, group_key,
instance_key)
with ops.device(device):
results.append(f())
t0 = threading.Thread(target=all_reduce, args=(device0,))
t1 = threading.Thread(target=all_reduce, args=(device1,))
t0.start()
t1.start()
t0.join()
t1.join()
self.assertAllEqual(results, [[2.], [2.]])
@combinations.generate(
combinations.times(collective_op_combinations, device_combination))
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
class OpCancellationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortIfNoCollective(self, collective_op, device,
communication):
# Do not abort if there's no active collective ops. There could be
# exceptions like EOF which we expect users to catch, aborting collective
# ops on all op errors intervenes with this workflow.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
dataset = dataset_ops.Dataset.from_tensors([1.])
@def_function.function
def collective_fn(in_tensor):
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def f():
iterator = iter(dataset)
collective_fn(next(iterator))
# This next(iterator) should raise EOF.
collective_fn(next(iterator))
with self.assertRaises(errors.OutOfRangeError):
f()
collective_fn(constant_op.constant([1.]))
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
],
mode='eager'), device_combination))
def testOpErrorAbortWithCollective(self, collective_op, device,
communication):
# Abort v1 collective ops if there're active collective ops at the time of
# an op error. This is due to the inability to cancel collective ops, and op
# errors may cause running collective ops to hang.
dev0 = '/device:%s:0' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test abortion
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Now collective ops is aborted, subsequent collective ops should fail with
# the previous error.
with self.assertRaises(errors.CancelledError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortWithCollective(self, collective_op, device,
communication):
# Do not abort v2 collective ops even if there're active collective ops at
# the time of an op error. We rely cancellation to terminate active
# collective ops.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
@def_function.function
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Local params resolution cannot be cancelled yet, so we perform a normal
# collective so that the group is resolved.
collective_fn()
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test cancellation
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Collective ops shouldn't be aborted and new collectives should be able to
# proceed.
collective_fn()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testCancelDuringParamResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
t1_cancellation_manager = cancellation.CancellationManager()
t2_cancellation_manager = cancellation.CancellationManager()
@def_function.function
def _collective_fn(x):
# Run an assertion to crash one of the two function executions running
# collectives. We explicitly cancel the other in response.
assert_op = check_ops.assert_equal(x, in_tensor)
with ops.control_dependencies([assert_op]):
return collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
collective_concrete = _collective_fn.get_concrete_function(in_tensor)
finish_mu = threading.Lock()
finishes = 0
def _placement_wrapper(device, x, my_cancellation, other_cancellation):
try:
with ops.device(device):
cancelable_collective = my_cancellation.get_cancelable_function(
collective_concrete)
return cancelable_collective(x)
except errors.InvalidArgumentError:
# `assert_equal` failed for this execution of the function. The other
# function would deadlock without cancellation.
other_cancellation.start_cancel()
except errors.CancelledError:
pass
nonlocal finishes
with finish_mu:
finishes += 1
t1 = threading.Thread(
target=_placement_wrapper,
args=(dev0, constant_op.constant([1.]), t1_cancellation_manager,
t2_cancellation_manager))
t2 = threading.Thread(
target=_placement_wrapper,
# Will cause the assertion to fail
args=(dev1, constant_op.constant([2.]), t2_cancellation_manager,
t1_cancellation_manager))
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(finishes, 2)
@combinations.generate(
combinations.times(collective_op_combinations, device_combination))
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
class CommunicationHintTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(collective_op_combinations,
combinations.combine(required_gpus=[0, 1])))
def testNCCLFallbackOnCPU(self, collective_op):
# communication_hint=NCCL should work for CPU by falling back to RING. The
# test doesn't actually require GPU, only GPU builds. We specify
# required_gpus=1 so that it's tested with GPU builds.
dev0 = '/device:CPU:0'
dev1 = '/device:CPU:1'
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint='NCCL')
run()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class OrderingTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testOrdering(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
with ops.device(dev0):
token0 = resource_variable_ops.ResourceVariable(0.)
with ops.device(dev1):
token1 = resource_variable_ops.ResourceVariable(0.)
@def_function.function
def f():
# Launch the first collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
# Launch the second collective without token.
with ops.device(dev0):
collective_op(in_tensor, group_size, group_key, instance_key)
with ops.device(dev1):
collective_op(in_tensor, group_size, group_key, instance_key)
# Launch the third collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
graph = f.get_concrete_function().graph
for device in [dev0, dev1]:
# Try to find the third collective, which should have the first collective
# as a control input.
third = None
for op in graph.get_operations():
if (op.type.startswith('Collective') and op.device.endswith(device) and
op.control_inputs and
op.control_inputs[0].type.startswith('Collective')):
self.assertIsNone(third)
third = op
self.assertIsNotNone(third)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in third.inputs))
first = third.control_inputs[0]
self.assertEqual(third.device, first.device)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in first.inputs))
self.assertEmpty(first.control_inputs)
class InputPipelineTest(test.TestCase):
def setUp(self):
super().setUp()
_setup_context()
def testMap(self):
group_size = 2
group_key = 100
instance_key = 100
def create_dataset_and_fetch_one(t):
dataset = dataset_ops.Dataset.from_tensor_slices([t])
def reduce_fn(t):
return CollectiveOpsV2.all_reduce(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key)
dataset = dataset.map(reduce_fn)
return next(iter(dataset))
@def_function.function
def f():
with ops.device('CPU:0'):
value0 = create_dataset_and_fetch_one([1.])
with ops.device('CPU:1'):
value1 = create_dataset_and_fetch_one([2.])
return value0, value1
self.assertAllEqual(self.evaluate(f()), [[3.], [3.]])
class CollectiveOpsV3Test(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
_setup_context()
def testGroupInitialization(self):
group_size = 2
group_key = 100
@def_function.function
def f():
with ops.device('CPU:0'):
_collective_ops.initialize_communicator(
group_key=group_key, rank=0, group_size=group_size)
with ops.device('CPU:1'):
_collective_ops.initialize_communicator(
group_key=group_key, rank=1, group_size=group_size)
# TODO(b/193864859): Add validation with reduction op.
self.evaluate(f())
@combinations.generate(device_combination)
def testAllReduceV3(self, device, communication):
group_size = 2
group_key = 101
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_reduce_v3(
group_handle0, [1.0], reduction='Add'))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_reduce_v3(
group_handle1, [2.0], reduction='Add'))
return collectives
for result in run_all_reduce_2devices():
self.assertAllClose(result, [3.], rtol=1e-5, atol=1e-5)
@combinations.generate(device_combination)
def testAllToAllV3(self, device, communication):
group_size = 2
group_key = 104
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_to_all_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle0, [1.0, 3.0]))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle1, [2.0, 4.0]))
return collectives
result = run_all_to_all_2devices()
self.assertAllClose(result[0], [1.0, 2.0], rtol=1e-5, atol=1e-5)
self.assertAllClose(result[1], [3.0, 4.0], rtol=1e-5, atol=1e-5)
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
if __name__ == '__main__':
os.environ['NCCL_DEBUG'] = 'INFO'
v2_compat.enable_v2_behavior()
test.main()
|
PYSEC-2021-629
|
tensorflow/python/kernel_tests/math_ops/bincount_op_test.py
|
@@ -366,7 +366,7 @@ def test_sparse_bincount_all_count(self, dtype):
num_rows = 128
size = 1000
n_elems = 4096
- inp_indices = np.random.randint(0, num_rows, (n_elems,))
+ inp_indices = np.random.randint(0, num_rows, (n_elems, 1))
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
np_out = np.bincount(inp_vals, minlength=size)
@@ -390,7 +390,7 @@ def test_sparse_bincount_all_count_with_weights(self, dtype):
num_rows = 128
size = 1000
n_elems = 4096
- inp_indices = np.random.randint(0, num_rows, (n_elems,))
+ inp_indices = np.random.randint(0, num_rows, (n_elems, 1))
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
inp_weight = np.random.random((n_elems,))
@@ -415,7 +415,7 @@ def test_sparse_bincount_all_binary(self, dtype):
num_rows = 128
size = 10
n_elems = 4096
- inp_indices = np.random.randint(0, num_rows, (n_elems,))
+ inp_indices = np.random.randint(0, num_rows, (n_elems, 1))
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
np_out = np.ones((size,))
@@ -440,7 +440,7 @@ def test_sparse_bincount_all_binary_weights(self, dtype):
num_rows = 128
size = 10
n_elems = 4096
- inp_indices = np.random.randint(0, num_rows, (n_elems,))
+ inp_indices = np.random.randint(0, num_rows, (n_elems, 1))
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
inp_weight = np.random.random((n_elems,))
@@ -532,6 +532,27 @@ def test_size_is_not_scalar(self): # b/206619828
weights=[0, 0],
binary_output=False))
+ def test_sparse_bincount_input_validation(self):
+ np.random.seed(42)
+ num_rows = 128
+ size = 1000
+ n_elems = 4096
+ inp_indices = np.random.randint(0, num_rows, (n_elems, 1))
+ inp_vals = np.random.randint(0, size, (n_elems,))
+
+ # Insert negative index.
+ inp_indices[10, 0] = -2
+
+ with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+ "out of bounds"):
+ self.evaluate(
+ gen_math_ops.sparse_bincount(
+ indices=inp_indices,
+ values=inp_vals,
+ dense_shape=[num_rows],
+ size=size,
+ weights=[]))
+
class RaggedBincountOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bincount_ops.bincount."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bincount_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
class BincountTest(test_util.TensorFlowTestCase):
def test_empty(self):
with self.session():
self.assertAllEqual(
self.evaluate(bincount_ops.bincount([], minlength=5)),
[0, 0, 0, 0, 0])
self.assertAllEqual(
self.evaluate(bincount_ops.bincount([], minlength=1)), [0])
self.assertAllEqual(
self.evaluate(bincount_ops.bincount([], minlength=0)), [])
self.assertEqual(
self.evaluate(
bincount_ops.bincount([], minlength=0, dtype=np.float32)).dtype,
np.float32)
self.assertEqual(
self.evaluate(
bincount_ops.bincount([], minlength=3, dtype=np.float64)).dtype,
np.float64)
def test_values(self):
with self.session():
self.assertAllEqual(
self.evaluate(bincount_ops.bincount([1, 1, 1, 2, 2, 3])),
[0, 3, 2, 1])
arr = [1, 1, 2, 1, 2, 3, 1, 2, 3, 4, 1, 2, 3, 4, 5]
self.assertAllEqual(
self.evaluate(bincount_ops.bincount(arr)), [0, 5, 4, 3, 2, 1])
arr += [0, 0, 0, 0, 0, 0]
self.assertAllEqual(
self.evaluate(bincount_ops.bincount(arr)), [6, 5, 4, 3, 2, 1])
self.assertAllEqual(self.evaluate(bincount_ops.bincount([])), [])
self.assertAllEqual(self.evaluate(bincount_ops.bincount([0, 0, 0])), [3])
self.assertAllEqual(
self.evaluate(bincount_ops.bincount([5])), [0, 0, 0, 0, 0, 1])
self.assertAllEqual(
self.evaluate(bincount_ops.bincount(np.arange(10000))),
np.ones(10000))
def test_maxlength(self):
with self.session():
self.assertAllEqual(
self.evaluate(bincount_ops.bincount([5], maxlength=3)), [0, 0, 0])
self.assertAllEqual(
self.evaluate(bincount_ops.bincount([1], maxlength=3)), [0, 1])
self.assertAllEqual(
self.evaluate(bincount_ops.bincount([], maxlength=3)), [])
def test_random_with_weights(self):
num_samples = 10000
with self.session():
np.random.seed(42)
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
arr = np.random.randint(0, 1000, num_samples)
if dtype == dtypes.int32 or dtype == dtypes.int64:
weights = np.random.randint(-100, 100, num_samples)
else:
weights = np.random.random(num_samples)
self.assertAllClose(
self.evaluate(bincount_ops.bincount(arr, weights)),
np.bincount(arr, weights))
def test_random_without_weights(self):
num_samples = 10000
with self.session():
np.random.seed(42)
for dtype in [np.int32, np.float32]:
arr = np.random.randint(0, 1000, num_samples)
weights = np.ones(num_samples).astype(dtype)
self.assertAllClose(
self.evaluate(bincount_ops.bincount(arr, None)),
np.bincount(arr, weights))
@test_util.run_gpu_only
def test_bincount_determinism_error(self):
arr = np.random.randint(0, 1000, size=1000)
with test_util.deterministic_ops(), self.assertRaisesRegex(
errors_impl.UnimplementedError,
"Determinism is not yet supported in GPU implementation of Bincount."):
self.evaluate(bincount_ops.bincount(arr, None, axis=None))
arr = np.random.randint(0, 1000, size=(100, 100))
with test_util.deterministic_ops(), self.assertRaisesRegex(
errors_impl.UnimplementedError,
"Determinism is not yet supported in GPU implementation of "
"DenseBincount."):
self.evaluate(bincount_ops.bincount(arr, None, axis=-1))
def test_zero_weights(self):
with self.session():
self.assertAllEqual(
self.evaluate(bincount_ops.bincount(np.arange(1000), np.zeros(1000))),
np.zeros(1000))
def test_negative(self):
# unsorted_segment_sum will only report InvalidArgumentError on CPU
with self.cached_session(), ops.device("/CPU:0"):
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(bincount_ops.bincount([1, 2, 3, -1, 6, 8]))
@test_util.run_in_graph_and_eager_modes
def test_shape_function(self):
# size must be scalar.
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Shape must be rank 0 but is rank 1(?s).*Bincount"):
gen_math_ops.bincount([1, 2, 3, 1, 6, 8], [1], [])
# size must be positive.
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be non-negative"):
gen_math_ops.bincount([1, 2, 3, 1, 6, 8], -5, [])
# if size is a constant then the shape is known.
v1 = gen_math_ops.bincount([1, 2, 3, 1, 6, 8], 5, [])
self.assertAllEqual(v1.get_shape().as_list(), [5])
# if size is a placeholder then the shape is unknown.
with ops.Graph().as_default():
s = array_ops.placeholder(dtype=dtypes.int32)
v2 = gen_math_ops.bincount([1, 2, 3, 1, 6, 8], s, [])
self.assertAllEqual(v2.get_shape().as_list(), [None])
class BincountOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_bincount_all_count(self, dtype):
np.random.seed(42)
size = 1000
inp = np.random.randint(0, size, (4096), dtype=dtype)
np_out = np.bincount(inp, minlength=size)
with test_util.use_gpu():
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.dense_bincount(input=inp, weights=[], size=size)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_bincount_all_count_with_weights(self, dtype):
np.random.seed(42)
size = 1000
inp = np.random.randint(0, size, (4096,), dtype=dtype)
np_weight = np.random.random((4096,))
np_out = np.bincount(inp, minlength=size, weights=np_weight)
with test_util.use_gpu():
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.dense_bincount(
input=inp, weights=np_weight, size=size)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_bincount_all_binary(self, dtype):
np.random.seed(42)
size = 10
inp = np.random.randint(0, size, (4096), dtype=dtype)
np_out = np.ones((size,))
with test_util.use_gpu():
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.dense_bincount(
input=inp, weights=[], size=size, binary_output=True)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_bincount_all_binary_with_weights(self, dtype):
np.random.seed(42)
size = 10
inp = np.random.randint(0, size, (4096,), dtype=dtype)
np_weight = np.random.random((4096,))
np_out = np.ones((size,))
with test_util.use_gpu():
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.dense_bincount(
input=inp, weights=np_weight, size=size, binary_output=True)))
def _test_bincount_col_count(self, num_rows, num_cols, size, dtype):
np.random.seed(42)
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate(
[np.bincount(inp[j, :], minlength=size) for j in range(num_rows)],
axis=0), (num_rows, size))
with test_util.use_gpu():
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.dense_bincount(input=inp, weights=[], size=size)))
def _test_bincount_col_binary(self, num_rows, num_cols, size, dtype):
np.random.seed(42)
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate([
np.where(np.bincount(inp[j, :], minlength=size) > 0, 1, 0)
for j in range(num_rows)
],
axis=0), (num_rows, size))
with test_util.use_gpu():
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.dense_bincount(
input=inp, weights=[], size=size, binary_output=True)))
def _test_bincount_col_count_with_weights(self, num_rows, num_cols, size,
dtype):
np.random.seed(42)
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_weight = np.random.random((num_rows, num_cols))
np_out = np.reshape(
np.concatenate([
np.bincount(inp[j, :], weights=np_weight[j, :], minlength=size)
for j in range(num_rows)
],
axis=0), (num_rows, size))
with test_util.use_gpu():
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.dense_bincount(
input=inp, weights=np_weight, size=size)))
def test_col_reduce_basic(self):
with test_util.use_gpu():
v = self.evaluate(
gen_math_ops.dense_bincount(
input=[[1, 2, 3], [0, 3, 2]], weights=[], size=4))
expected_out = [[0., 1., 1., 1.], [1., 0., 1., 1.]]
self.assertAllEqual(expected_out, v)
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_col_reduce_shared_memory(self, dtype):
# num_rows * num_bins less than half of max shared memory.
num_rows = 128
num_cols = 27
size = 10
self._test_bincount_col_count(num_rows, num_cols, size, dtype)
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_col_reduce_global_memory(self, dtype):
# num_rows * num_bins more than half of max shared memory.
num_rows = 128
num_cols = 27
size = 1024
self._test_bincount_col_count(num_rows, num_cols, size, dtype)
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_col_reduce_shared_memory_with_weights(self, dtype):
# num_rows * num_bins less than half of max shared memory.
num_rows = 128
num_cols = 27
size = 100
self._test_bincount_col_count_with_weights(num_rows, num_cols, size, dtype)
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_col_reduce_global_memory_with_weights(self, dtype):
# num_rows * num_bins more than half of max shared memory.
num_rows = 128
num_cols = 27
size = 1024
self._test_bincount_col_count_with_weights(num_rows, num_cols, size, dtype)
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_col_reduce_binary(self, dtype):
num_rows = 128
num_cols = 7
size = 10
self._test_bincount_col_binary(num_rows, num_cols, size, dtype)
def test_invalid_rank(self):
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"at most rank 2"):
with test_util.use_gpu():
self.evaluate(
gen_math_ops.dense_bincount(
input=[[[1, 2, 3], [0, 3, 2]]], weights=[], size=10))
@test_util.run_in_graph_and_eager_modes
def test_size_is_not_scalar(self): # b/206619828
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Shape must be rank 0 but is rank 1"):
self.evaluate(
gen_math_ops.dense_bincount(
input=[0], size=[1, 1], weights=[3], binary_output=False))
class SparseBincountOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_bincount_all_count(self, dtype):
np.random.seed(42)
num_rows = 128
size = 1000
n_elems = 4096
inp_indices = np.random.randint(0, num_rows, (n_elems,))
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
np_out = np.bincount(inp_vals, minlength=size)
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.sparse_bincount(
indices=inp_indices,
values=inp_vals,
dense_shape=[num_rows],
size=size,
weights=[])))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_bincount_all_count_with_weights(self, dtype):
np.random.seed(42)
num_rows = 128
size = 1000
n_elems = 4096
inp_indices = np.random.randint(0, num_rows, (n_elems,))
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
inp_weight = np.random.random((n_elems,))
np_out = np.bincount(inp_vals, minlength=size, weights=inp_weight)
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.sparse_bincount(
indices=inp_indices,
values=inp_vals,
dense_shape=[num_rows],
size=size,
weights=inp_weight)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_bincount_all_binary(self, dtype):
np.random.seed(42)
num_rows = 128
size = 10
n_elems = 4096
inp_indices = np.random.randint(0, num_rows, (n_elems,))
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
np_out = np.ones((size,))
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.sparse_bincount(
indices=inp_indices,
values=inp_vals,
dense_shape=[num_rows],
size=size,
weights=[],
binary_output=True)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_bincount_all_binary_weights(self, dtype):
np.random.seed(42)
num_rows = 128
size = 10
n_elems = 4096
inp_indices = np.random.randint(0, num_rows, (n_elems,))
inp_vals = np.random.randint(0, size, (n_elems,), dtype=dtype)
inp_weight = np.random.random((n_elems,))
np_out = np.ones((size,))
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.sparse_bincount(
indices=inp_indices,
values=inp_vals,
dense_shape=[num_rows],
size=size,
weights=inp_weight,
binary_output=True)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_bincount_col_reduce_count(self, dtype):
num_rows = 128
num_cols = 27
size = 100
np.random.seed(42)
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate(
[np.bincount(inp[j, :], minlength=size) for j in range(num_rows)],
axis=0), (num_rows, size))
# from_dense will filter out 0s.
inp = inp + 1
# from_dense will cause OOM in GPU.
with ops.device("/CPU:0"):
inp_sparse = sparse_ops.from_dense(inp)
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.sparse_bincount(
indices=inp_sparse.indices,
values=inp_sparse.values - 1,
dense_shape=inp_sparse.dense_shape,
size=size,
weights=[])))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_sparse_bincount_col_reduce_binary(self, dtype):
num_rows = 128
num_cols = 27
size = 100
np.random.seed(42)
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate([
np.where(np.bincount(inp[j, :], minlength=size) > 0, 1, 0)
for j in range(num_rows)
],
axis=0), (num_rows, size))
# from_dense will filter out 0s.
inp = inp + 1
# from_dense will cause OOM in GPU.
with ops.device("/CPU:0"):
inp_sparse = sparse_ops.from_dense(inp)
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.sparse_bincount(
indices=inp_sparse.indices,
values=inp_sparse.values - 1,
dense_shape=inp_sparse.dense_shape,
size=size,
weights=[],
binary_output=True)))
@test_util.run_in_graph_and_eager_modes
def test_size_is_not_scalar(self): # b/206619828
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Shape must be rank 0 but is rank 1"):
self.evaluate(
gen_math_ops.sparse_bincount(
indices=[[0], [1]],
values=[0, 0],
dense_shape=[1, 1],
size=[1, 1],
weights=[0, 0],
binary_output=False))
class RaggedBincountOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_bincount_count(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]])
expected_output = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0,
0], [1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 2, 1]]
self.assertAllEqual(
expected_output,
self.evaluate(
gen_math_ops.ragged_bincount(
splits=x.row_splits, values=x.values, weights=[], size=6)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_bincount_binary(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]])
expected_output = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0,
0], [1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 1, 1]]
self.assertAllEqual(
expected_output,
self.evaluate(
gen_math_ops.ragged_bincount(
splits=x.row_splits,
values=x.values,
weights=[],
size=6,
binary_output=True)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_bincount_count_with_weights(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]])
weights = ragged_factory_ops.constant([[], [], [.1, .2, .3], [],
[.2, .5, .6, .3]])
expected_output = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[.2, .3, 0, .1, 0, 0], [0, 0, 0, 0, 0, 0],
[.5, 0, 0, 0, .9, .2]]
self.assertAllClose(
expected_output,
self.evaluate(
gen_math_ops.ragged_bincount(
splits=x.row_splits,
values=x.values,
weights=weights.values,
size=6)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_bincount_count_np(self, dtype):
np.random.seed(42)
num_rows = 128
num_cols = 27
size = 1000
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate(
[np.bincount(inp[j, :], minlength=size) for j in range(num_rows)],
axis=0), (num_rows, size))
x = ragged_tensor.RaggedTensor.from_tensor(inp)
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.ragged_bincount(
splits=x.row_splits, values=x.values, weights=[], size=size)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_bincount_count_np_with_weights(self, dtype):
np.random.seed(42)
num_rows = 128
num_cols = 27
size = 1000
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_weight = np.random.random((num_rows, num_cols))
np_out = np.reshape(
np.concatenate([
np.bincount(inp[j, :], weights=np_weight[j, :], minlength=size)
for j in range(num_rows)
],
axis=0), (num_rows, size))
x = ragged_tensor.RaggedTensor.from_tensor(inp)
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.ragged_bincount(
splits=x.row_splits,
values=x.values,
weights=np_weight,
size=size)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_bincount_binary_np_with_weights(self, dtype):
np.random.seed(42)
num_rows = 128
num_cols = 27
size = 1000
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate([
np.where(np.bincount(inp[j, :], minlength=size) > 0, 1, 0)
for j in range(num_rows)
],
axis=0), (num_rows, size))
x = ragged_tensor.RaggedTensor.from_tensor(inp)
self.assertAllEqual(
np_out,
self.evaluate(
gen_math_ops.ragged_bincount(
splits=x.row_splits,
values=x.values,
weights=[],
size=size,
binary_output=True)))
@test_util.run_in_graph_and_eager_modes
def test_size_is_not_scalar(self): # b/206619828
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Shape must be rank 0 but is rank 1"):
self.evaluate(
gen_math_ops.ragged_bincount(
splits=[0, 0, 1],
values=[1],
size=[1, 1],
weights=[0, 0, 0],
binary_output=False,
name=None))
if __name__ == "__main__":
googletest.main()
|
GHSA-397c-5g2j-qxpv
|
synapse/rest/media/v1/_base.py
|
@@ -29,7 +29,7 @@
from synapse.http.server import finish_request, respond_with_json
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
-from synapse.util.stringutils import is_ascii
+from synapse.util.stringutils import is_ascii, parse_and_validate_server_name
logger = logging.getLogger(__name__)
@@ -51,6 +51,19 @@
def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:
+ """Parses the server name, media ID and optional file name from the request URI
+
+ Also performs some rough validation on the server name.
+
+ Args:
+ request: The `Request`.
+
+ Returns:
+ A tuple containing the parsed server name, media ID and optional file name.
+
+ Raises:
+ SynapseError(404): if parsing or validation fail for any reason
+ """
try:
# The type on postpath seems incorrect in Twisted 21.2.0.
postpath: List[bytes] = request.postpath # type: ignore
@@ -62,6 +75,9 @@ def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:
server_name = server_name_bytes.decode("utf-8")
media_id = media_id_bytes.decode("utf8")
+ # Validate the server name, raising if invalid
+ parse_and_validate_server_name(server_name)
+
file_name = None
if len(postpath) > 2:
try:
|
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import urllib
from types import TracebackType
from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
import attr
from twisted.internet.interfaces import IConsumer
from twisted.protocols.basic import FileSender
from twisted.web.server import Request
from synapse.api.errors import Codes, SynapseError, cs_error
from synapse.http.server import finish_request, respond_with_json
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.util.stringutils import is_ascii
logger = logging.getLogger(__name__)
# list all text content types that will have the charset default to UTF-8 when
# none is given
TEXT_CONTENT_TYPES = [
"text/css",
"text/csv",
"text/html",
"text/calendar",
"text/plain",
"text/javascript",
"application/json",
"application/ld+json",
"application/rtf",
"image/svg+xml",
"text/xml",
]
def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:
try:
# The type on postpath seems incorrect in Twisted 21.2.0.
postpath: List[bytes] = request.postpath # type: ignore
assert postpath
# This allows users to append e.g. /test.png to the URL. Useful for
# clients that parse the URL to see content type.
server_name_bytes, media_id_bytes = postpath[:2]
server_name = server_name_bytes.decode("utf-8")
media_id = media_id_bytes.decode("utf8")
file_name = None
if len(postpath) > 2:
try:
file_name = urllib.parse.unquote(postpath[-1].decode("utf-8"))
except UnicodeDecodeError:
pass
return server_name, media_id, file_name
except Exception:
raise SynapseError(
404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN
)
def respond_404(request: SynapseRequest) -> None:
respond_with_json(
request,
404,
cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND),
send_cors=True,
)
async def respond_with_file(
request: SynapseRequest,
media_type: str,
file_path: str,
file_size: Optional[int] = None,
upload_name: Optional[str] = None,
) -> None:
logger.debug("Responding with %r", file_path)
if os.path.isfile(file_path):
if file_size is None:
stat = os.stat(file_path)
file_size = stat.st_size
add_file_headers(request, media_type, file_size, upload_name)
with open(file_path, "rb") as f:
await make_deferred_yieldable(FileSender().beginFileTransfer(f, request))
finish_request(request)
else:
respond_404(request)
def add_file_headers(
request: Request,
media_type: str,
file_size: Optional[int],
upload_name: Optional[str],
) -> None:
"""Adds the correct response headers in preparation for responding with the
media.
Args:
request
media_type: The media/content type.
file_size: Size in bytes of the media, if known.
upload_name: The name of the requested file, if any.
"""
def _quote(x: str) -> str:
return urllib.parse.quote(x.encode("utf-8"))
# Default to a UTF-8 charset for text content types.
# ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16'
if media_type.lower() in TEXT_CONTENT_TYPES:
content_type = media_type + "; charset=UTF-8"
else:
content_type = media_type
request.setHeader(b"Content-Type", content_type.encode("UTF-8"))
if upload_name:
# RFC6266 section 4.1 [1] defines both `filename` and `filename*`.
#
# `filename` is defined to be a `value`, which is defined by RFC2616
# section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
# is (essentially) a single US-ASCII word, and a `quoted-string` is a
# US-ASCII string surrounded by double-quotes, using backslash as an
# escape character. Note that %-encoding is *not* permitted.
#
# `filename*` is defined to be an `ext-value`, which is defined in
# RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,
# where `value-chars` is essentially a %-encoded string in the given charset.
#
# [1]: https://tools.ietf.org/html/rfc6266#section-4.1
# [2]: https://tools.ietf.org/html/rfc2616#section-3.6
# [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1
# We avoid the quoted-string version of `filename`, because (a) synapse didn't
# correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we
# may as well just do the filename* version.
if _can_encode_filename_as_token(upload_name):
disposition = "inline; filename=%s" % (upload_name,)
else:
disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),)
request.setHeader(b"Content-Disposition", disposition.encode("ascii"))
# cache for at least a day.
# XXX: we might want to turn this off for data we don't want to
# recommend caching as it's sensitive or private - or at least
# select private. don't bother setting Expires as all our
# clients are smart enough to be happy with Cache-Control
request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
if file_size is not None:
request.setHeader(b"Content-Length", b"%d" % (file_size,))
# Tell web crawlers to not index, archive, or follow links in media. This
# should help to prevent things in the media repo from showing up in web
# search results.
request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex")
# separators as defined in RFC2616. SP and HT are handled separately.
# see _can_encode_filename_as_token.
_FILENAME_SEPARATOR_CHARS = {
"(",
")",
"<",
">",
"@",
",",
";",
":",
"\\",
'"',
"/",
"[",
"]",
"?",
"=",
"{",
"}",
}
def _can_encode_filename_as_token(x: str) -> bool:
for c in x:
# from RFC2616:
#
# token = 1*<any CHAR except CTLs or separators>
#
# separators = "(" | ")" | "<" | ">" | "@"
# | "," | ";" | ":" | "\" | <">
# | "/" | "[" | "]" | "?" | "="
# | "{" | "}" | SP | HT
#
# CHAR = <any US-ASCII character (octets 0 - 127)>
#
# CTL = <any US-ASCII control character
# (octets 0 - 31) and DEL (127)>
#
if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS:
return False
return True
async def respond_with_responder(
request: SynapseRequest,
responder: "Optional[Responder]",
media_type: str,
file_size: Optional[int],
upload_name: Optional[str] = None,
) -> None:
"""Responds to the request with given responder. If responder is None then
returns 404.
Args:
request
responder
media_type: The media/content type.
file_size: Size in bytes of the media. If not known it should be None
upload_name: The name of the requested file, if any.
"""
if request._disconnected:
logger.warning(
"Not sending response to request %s, already disconnected.", request
)
return
if not responder:
respond_404(request)
return
logger.debug("Responding to media request with responder %s", responder)
add_file_headers(request, media_type, file_size, upload_name)
try:
with responder:
await responder.write_to_consumer(request)
except Exception as e:
# The majority of the time this will be due to the client having gone
# away. Unfortunately, Twisted simply throws a generic exception at us
# in that case.
logger.warning("Failed to write to consumer: %s %s", type(e), e)
# Unregister the producer, if it has one, so Twisted doesn't complain
if request.producer:
request.unregisterProducer()
finish_request(request)
class Responder:
"""Represents a response that can be streamed to the requester.
Responder is a context manager which *must* be used, so that any resources
held can be cleaned up.
"""
def write_to_consumer(self, consumer: IConsumer) -> Awaitable:
"""Stream response into consumer
Args:
consumer: The consumer to stream into.
Returns:
Resolves once the response has finished being written
"""
pass
def __enter__(self) -> None:
pass
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
pass
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ThumbnailInfo:
"""Details about a generated thumbnail."""
width: int
height: int
method: str
# Content type of thumbnail, e.g. image/png
type: str
# The size of the media file, in bytes.
length: Optional[int] = None
@attr.s(slots=True, frozen=True, auto_attribs=True)
class FileInfo:
"""Details about a requested/uploaded file."""
# The server name where the media originated from, or None if local.
server_name: Optional[str]
# The local ID of the file. For local files this is the same as the media_id
file_id: str
# If the file is for the url preview cache
url_cache: bool = False
# Whether the file is a thumbnail or not.
thumbnail: Optional[ThumbnailInfo] = None
# The below properties exist to maintain compatibility with third-party modules.
@property
def thumbnail_width(self) -> Optional[int]:
if not self.thumbnail:
return None
return self.thumbnail.width
@property
def thumbnail_height(self) -> Optional[int]:
if not self.thumbnail:
return None
return self.thumbnail.height
@property
def thumbnail_method(self) -> Optional[str]:
if not self.thumbnail:
return None
return self.thumbnail.method
@property
def thumbnail_type(self) -> Optional[str]:
if not self.thumbnail:
return None
return self.thumbnail.type
@property
def thumbnail_length(self) -> Optional[int]:
if not self.thumbnail:
return None
return self.thumbnail.length
def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]:
"""
Get the filename of the downloaded file by inspecting the
Content-Disposition HTTP header.
Args:
headers: The HTTP request headers.
Returns:
The filename, or None.
"""
content_disposition = headers.get(b"Content-Disposition", [b""])
# No header, bail out.
if not content_disposition[0]:
return None
_, params = _parse_header(content_disposition[0])
upload_name = None
# First check if there is a valid UTF-8 filename
upload_name_utf8 = params.get(b"filename*", None)
if upload_name_utf8:
if upload_name_utf8.lower().startswith(b"utf-8''"):
upload_name_utf8 = upload_name_utf8[7:]
# We have a filename*= section. This MUST be ASCII, and any UTF-8
# bytes are %-quoted.
try:
# Once it is decoded, we can then unquote the %-encoded
# parts strictly into a unicode string.
upload_name = urllib.parse.unquote(
upload_name_utf8.decode("ascii"), errors="strict"
)
except UnicodeDecodeError:
# Incorrect UTF-8.
pass
# If there isn't check for an ascii name.
if not upload_name:
upload_name_ascii = params.get(b"filename", None)
if upload_name_ascii and is_ascii(upload_name_ascii):
upload_name = upload_name_ascii.decode("ascii")
# This may be None here, indicating we did not find a matching name.
return upload_name
def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]:
"""Parse a Content-type like header.
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
line: header to be parsed
Returns:
The main content-type, followed by the parameter dictionary
"""
parts = _parseparam(b";" + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find(b"=")
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1 :].strip()
# strip double-quotes
if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parseparam(s: bytes) -> Generator[bytes, None, None]:
"""Generator which splits the input on ;, respecting double-quoted sequences
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
s: header to be parsed
Returns:
The split input
"""
while s[:1] == b";":
s = s[1:]
# look for the next ;
end = s.find(b";")
# if there is an odd number of " marks between here and the next ;, skip to the
# next ; instead
while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
end = s.find(b";", end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
|
GHSA-3hfw-x7gx-437c
|
synapse/rest/media/v1/filepath.py
|
@@ -16,7 +16,8 @@
import functools
import os
import re
-from typing import Any, Callable, List, TypeVar, cast
+import string
+from typing import Any, Callable, List, TypeVar, Union, cast
NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d")
@@ -37,6 +38,85 @@ def _wrapped(self: "MediaFilePaths", *args: Any, **kwargs: Any) -> str:
return cast(F, _wrapped)
+GetPathMethod = TypeVar(
+ "GetPathMethod", bound=Union[Callable[..., str], Callable[..., List[str]]]
+)
+
+
+def _wrap_with_jail_check(func: GetPathMethod) -> GetPathMethod:
+ """Wraps a path-returning method to check that the returned path(s) do not escape
+ the media store directory.
+
+ The check is not expected to ever fail, unless `func` is missing a call to
+ `_validate_path_component`, or `_validate_path_component` is buggy.
+
+ Args:
+ func: The `MediaFilePaths` method to wrap. The method may return either a single
+ path, or a list of paths. Returned paths may be either absolute or relative.
+
+ Returns:
+ The method, wrapped with a check to ensure that the returned path(s) lie within
+ the media store directory. Raises a `ValueError` if the check fails.
+ """
+
+ @functools.wraps(func)
+ def _wrapped(
+ self: "MediaFilePaths", *args: Any, **kwargs: Any
+ ) -> Union[str, List[str]]:
+ path_or_paths = func(self, *args, **kwargs)
+
+ if isinstance(path_or_paths, list):
+ paths_to_check = path_or_paths
+ else:
+ paths_to_check = [path_or_paths]
+
+ for path in paths_to_check:
+ # path may be an absolute or relative path, depending on the method being
+ # wrapped. When "appending" an absolute path, `os.path.join` discards the
+ # previous path, which is desired here.
+ normalized_path = os.path.normpath(os.path.join(self.real_base_path, path))
+ if (
+ os.path.commonpath([normalized_path, self.real_base_path])
+ != self.real_base_path
+ ):
+ raise ValueError(f"Invalid media store path: {path!r}")
+
+ return path_or_paths
+
+ return cast(GetPathMethod, _wrapped)
+
+
+ALLOWED_CHARACTERS = set(
+ string.ascii_letters
+ + string.digits
+ + "_-"
+ + ".[]:" # Domain names, IPv6 addresses and ports in server names
+)
+FORBIDDEN_NAMES = {
+ "",
+ os.path.curdir, # "." for the current platform
+ os.path.pardir, # ".." for the current platform
+}
+
+
+def _validate_path_component(name: str) -> str:
+ """Checks that the given string can be safely used as a path component
+
+ Args:
+ name: The path component to check.
+
+ Returns:
+ The path component if valid.
+
+ Raises:
+ ValueError: If `name` cannot be safely used as a path component.
+ """
+ if not ALLOWED_CHARACTERS.issuperset(name) or name in FORBIDDEN_NAMES:
+ raise ValueError(f"Invalid path component: {name!r}")
+
+ return name
+
+
class MediaFilePaths:
"""Describes where files are stored on disk.
@@ -48,22 +128,46 @@ class MediaFilePaths:
def __init__(self, primary_base_path: str):
self.base_path = primary_base_path
+ # The media store directory, with all symlinks resolved.
+ self.real_base_path = os.path.realpath(primary_base_path)
+
+ # Refuse to initialize if paths cannot be validated correctly for the current
+ # platform.
+ assert os.path.sep not in ALLOWED_CHARACTERS
+ assert os.path.altsep not in ALLOWED_CHARACTERS
+ # On Windows, paths have all sorts of weirdness which `_validate_path_component`
+ # does not consider. In any case, the remote media store can't work correctly
+ # for certain homeservers there, since ":"s aren't allowed in paths.
+ assert os.name == "posix"
+
+ @_wrap_with_jail_check
def local_media_filepath_rel(self, media_id: str) -> str:
- return os.path.join("local_content", media_id[0:2], media_id[2:4], media_id[4:])
+ return os.path.join(
+ "local_content",
+ _validate_path_component(media_id[0:2]),
+ _validate_path_component(media_id[2:4]),
+ _validate_path_component(media_id[4:]),
+ )
local_media_filepath = _wrap_in_base_path(local_media_filepath_rel)
+ @_wrap_with_jail_check
def local_media_thumbnail_rel(
self, media_id: str, width: int, height: int, content_type: str, method: str
) -> str:
top_level_type, sub_type = content_type.split("/")
file_name = "%i-%i-%s-%s-%s" % (width, height, top_level_type, sub_type, method)
return os.path.join(
- "local_thumbnails", media_id[0:2], media_id[2:4], media_id[4:], file_name
+ "local_thumbnails",
+ _validate_path_component(media_id[0:2]),
+ _validate_path_component(media_id[2:4]),
+ _validate_path_component(media_id[4:]),
+ _validate_path_component(file_name),
)
local_media_thumbnail = _wrap_in_base_path(local_media_thumbnail_rel)
+ @_wrap_with_jail_check
def local_media_thumbnail_dir(self, media_id: str) -> str:
"""
Retrieve the local store path of thumbnails of a given media_id
@@ -76,18 +180,24 @@ def local_media_thumbnail_dir(self, media_id: str) -> str:
return os.path.join(
self.base_path,
"local_thumbnails",
- media_id[0:2],
- media_id[2:4],
- media_id[4:],
+ _validate_path_component(media_id[0:2]),
+ _validate_path_component(media_id[2:4]),
+ _validate_path_component(media_id[4:]),
)
+ @_wrap_with_jail_check
def remote_media_filepath_rel(self, server_name: str, file_id: str) -> str:
return os.path.join(
- "remote_content", server_name, file_id[0:2], file_id[2:4], file_id[4:]
+ "remote_content",
+ _validate_path_component(server_name),
+ _validate_path_component(file_id[0:2]),
+ _validate_path_component(file_id[2:4]),
+ _validate_path_component(file_id[4:]),
)
remote_media_filepath = _wrap_in_base_path(remote_media_filepath_rel)
+ @_wrap_with_jail_check
def remote_media_thumbnail_rel(
self,
server_name: str,
@@ -101,62 +211,86 @@ def remote_media_thumbnail_rel(
file_name = "%i-%i-%s-%s-%s" % (width, height, top_level_type, sub_type, method)
return os.path.join(
"remote_thumbnail",
- server_name,
- file_id[0:2],
- file_id[2:4],
- file_id[4:],
- file_name,
+ _validate_path_component(server_name),
+ _validate_path_component(file_id[0:2]),
+ _validate_path_component(file_id[2:4]),
+ _validate_path_component(file_id[4:]),
+ _validate_path_component(file_name),
)
remote_media_thumbnail = _wrap_in_base_path(remote_media_thumbnail_rel)
# Legacy path that was used to store thumbnails previously.
# Should be removed after some time, when most of the thumbnails are stored
# using the new path.
+ @_wrap_with_jail_check
def remote_media_thumbnail_rel_legacy(
self, server_name: str, file_id: str, width: int, height: int, content_type: str
) -> str:
top_level_type, sub_type = content_type.split("/")
file_name = "%i-%i-%s-%s" % (width, height, top_level_type, sub_type)
return os.path.join(
"remote_thumbnail",
- server_name,
- file_id[0:2],
- file_id[2:4],
- file_id[4:],
- file_name,
+ _validate_path_component(server_name),
+ _validate_path_component(file_id[0:2]),
+ _validate_path_component(file_id[2:4]),
+ _validate_path_component(file_id[4:]),
+ _validate_path_component(file_name),
)
def remote_media_thumbnail_dir(self, server_name: str, file_id: str) -> str:
return os.path.join(
self.base_path,
"remote_thumbnail",
- server_name,
- file_id[0:2],
- file_id[2:4],
- file_id[4:],
+ _validate_path_component(server_name),
+ _validate_path_component(file_id[0:2]),
+ _validate_path_component(file_id[2:4]),
+ _validate_path_component(file_id[4:]),
)
+ @_wrap_with_jail_check
def url_cache_filepath_rel(self, media_id: str) -> str:
if NEW_FORMAT_ID_RE.match(media_id):
# Media id is of the form <DATE><RANDOM_STRING>
# E.g.: 2017-09-28-fsdRDt24DS234dsf
- return os.path.join("url_cache", media_id[:10], media_id[11:])
+ return os.path.join(
+ "url_cache",
+ _validate_path_component(media_id[:10]),
+ _validate_path_component(media_id[11:]),
+ )
else:
- return os.path.join("url_cache", media_id[0:2], media_id[2:4], media_id[4:])
+ return os.path.join(
+ "url_cache",
+ _validate_path_component(media_id[0:2]),
+ _validate_path_component(media_id[2:4]),
+ _validate_path_component(media_id[4:]),
+ )
url_cache_filepath = _wrap_in_base_path(url_cache_filepath_rel)
+ @_wrap_with_jail_check
def url_cache_filepath_dirs_to_delete(self, media_id: str) -> List[str]:
"The dirs to try and remove if we delete the media_id file"
if NEW_FORMAT_ID_RE.match(media_id):
- return [os.path.join(self.base_path, "url_cache", media_id[:10])]
+ return [
+ os.path.join(
+ self.base_path, "url_cache", _validate_path_component(media_id[:10])
+ )
+ ]
else:
return [
- os.path.join(self.base_path, "url_cache", media_id[0:2], media_id[2:4]),
- os.path.join(self.base_path, "url_cache", media_id[0:2]),
+ os.path.join(
+ self.base_path,
+ "url_cache",
+ _validate_path_component(media_id[0:2]),
+ _validate_path_component(media_id[2:4]),
+ ),
+ os.path.join(
+ self.base_path, "url_cache", _validate_path_component(media_id[0:2])
+ ),
]
+ @_wrap_with_jail_check
def url_cache_thumbnail_rel(
self, media_id: str, width: int, height: int, content_type: str, method: str
) -> str:
@@ -168,59 +302,82 @@ def url_cache_thumbnail_rel(
if NEW_FORMAT_ID_RE.match(media_id):
return os.path.join(
- "url_cache_thumbnails", media_id[:10], media_id[11:], file_name
+ "url_cache_thumbnails",
+ _validate_path_component(media_id[:10]),
+ _validate_path_component(media_id[11:]),
+ _validate_path_component(file_name),
)
else:
return os.path.join(
"url_cache_thumbnails",
- media_id[0:2],
- media_id[2:4],
- media_id[4:],
- file_name,
+ _validate_path_component(media_id[0:2]),
+ _validate_path_component(media_id[2:4]),
+ _validate_path_component(media_id[4:]),
+ _validate_path_component(file_name),
)
url_cache_thumbnail = _wrap_in_base_path(url_cache_thumbnail_rel)
+ @_wrap_with_jail_check
def url_cache_thumbnail_directory_rel(self, media_id: str) -> str:
# Media id is of the form <DATE><RANDOM_STRING>
# E.g.: 2017-09-28-fsdRDt24DS234dsf
if NEW_FORMAT_ID_RE.match(media_id):
- return os.path.join("url_cache_thumbnails", media_id[:10], media_id[11:])
+ return os.path.join(
+ "url_cache_thumbnails",
+ _validate_path_component(media_id[:10]),
+ _validate_path_component(media_id[11:]),
+ )
else:
return os.path.join(
"url_cache_thumbnails",
- media_id[0:2],
- media_id[2:4],
- media_id[4:],
+ _validate_path_component(media_id[0:2]),
+ _validate_path_component(media_id[2:4]),
+ _validate_path_component(media_id[4:]),
)
url_cache_thumbnail_directory = _wrap_in_base_path(
url_cache_thumbnail_directory_rel
)
+ @_wrap_with_jail_check
def url_cache_thumbnail_dirs_to_delete(self, media_id: str) -> List[str]:
"The dirs to try and remove if we delete the media_id thumbnails"
# Media id is of the form <DATE><RANDOM_STRING>
# E.g.: 2017-09-28-fsdRDt24DS234dsf
if NEW_FORMAT_ID_RE.match(media_id):
return [
os.path.join(
- self.base_path, "url_cache_thumbnails", media_id[:10], media_id[11:]
+ self.base_path,
+ "url_cache_thumbnails",
+ _validate_path_component(media_id[:10]),
+ _validate_path_component(media_id[11:]),
+ ),
+ os.path.join(
+ self.base_path,
+ "url_cache_thumbnails",
+ _validate_path_component(media_id[:10]),
),
- os.path.join(self.base_path, "url_cache_thumbnails", media_id[:10]),
]
else:
return [
os.path.join(
self.base_path,
"url_cache_thumbnails",
- media_id[0:2],
- media_id[2:4],
- media_id[4:],
+ _validate_path_component(media_id[0:2]),
+ _validate_path_component(media_id[2:4]),
+ _validate_path_component(media_id[4:]),
),
os.path.join(
- self.base_path, "url_cache_thumbnails", media_id[0:2], media_id[2:4]
+ self.base_path,
+ "url_cache_thumbnails",
+ _validate_path_component(media_id[0:2]),
+ _validate_path_component(media_id[2:4]),
+ ),
+ os.path.join(
+ self.base_path,
+ "url_cache_thumbnails",
+ _validate_path_component(media_id[0:2]),
),
- os.path.join(self.base_path, "url_cache_thumbnails", media_id[0:2]),
]
|
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2020-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import re
from typing import Any, Callable, List, TypeVar, cast
NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d")
F = TypeVar("F", bound=Callable[..., str])
def _wrap_in_base_path(func: F) -> F:
"""Takes a function that returns a relative path and turns it into an
absolute path based on the location of the primary media store
"""
@functools.wraps(func)
def _wrapped(self: "MediaFilePaths", *args: Any, **kwargs: Any) -> str:
path = func(self, *args, **kwargs)
return os.path.join(self.base_path, path)
return cast(F, _wrapped)
class MediaFilePaths:
"""Describes where files are stored on disk.
Most of the functions have a `*_rel` variant which returns a file path that
is relative to the base media store path. This is mainly used when we want
to write to the backup media store (when one is configured)
"""
def __init__(self, primary_base_path: str):
self.base_path = primary_base_path
def local_media_filepath_rel(self, media_id: str) -> str:
return os.path.join("local_content", media_id[0:2], media_id[2:4], media_id[4:])
local_media_filepath = _wrap_in_base_path(local_media_filepath_rel)
def local_media_thumbnail_rel(
self, media_id: str, width: int, height: int, content_type: str, method: str
) -> str:
top_level_type, sub_type = content_type.split("/")
file_name = "%i-%i-%s-%s-%s" % (width, height, top_level_type, sub_type, method)
return os.path.join(
"local_thumbnails", media_id[0:2], media_id[2:4], media_id[4:], file_name
)
local_media_thumbnail = _wrap_in_base_path(local_media_thumbnail_rel)
def local_media_thumbnail_dir(self, media_id: str) -> str:
"""
Retrieve the local store path of thumbnails of a given media_id
Args:
media_id: The media ID to query.
Returns:
Path of local_thumbnails from media_id
"""
return os.path.join(
self.base_path,
"local_thumbnails",
media_id[0:2],
media_id[2:4],
media_id[4:],
)
def remote_media_filepath_rel(self, server_name: str, file_id: str) -> str:
return os.path.join(
"remote_content", server_name, file_id[0:2], file_id[2:4], file_id[4:]
)
remote_media_filepath = _wrap_in_base_path(remote_media_filepath_rel)
def remote_media_thumbnail_rel(
self,
server_name: str,
file_id: str,
width: int,
height: int,
content_type: str,
method: str,
) -> str:
top_level_type, sub_type = content_type.split("/")
file_name = "%i-%i-%s-%s-%s" % (width, height, top_level_type, sub_type, method)
return os.path.join(
"remote_thumbnail",
server_name,
file_id[0:2],
file_id[2:4],
file_id[4:],
file_name,
)
remote_media_thumbnail = _wrap_in_base_path(remote_media_thumbnail_rel)
# Legacy path that was used to store thumbnails previously.
# Should be removed after some time, when most of the thumbnails are stored
# using the new path.
def remote_media_thumbnail_rel_legacy(
self, server_name: str, file_id: str, width: int, height: int, content_type: str
) -> str:
top_level_type, sub_type = content_type.split("/")
file_name = "%i-%i-%s-%s" % (width, height, top_level_type, sub_type)
return os.path.join(
"remote_thumbnail",
server_name,
file_id[0:2],
file_id[2:4],
file_id[4:],
file_name,
)
def remote_media_thumbnail_dir(self, server_name: str, file_id: str) -> str:
return os.path.join(
self.base_path,
"remote_thumbnail",
server_name,
file_id[0:2],
file_id[2:4],
file_id[4:],
)
def url_cache_filepath_rel(self, media_id: str) -> str:
if NEW_FORMAT_ID_RE.match(media_id):
# Media id is of the form <DATE><RANDOM_STRING>
# E.g.: 2017-09-28-fsdRDt24DS234dsf
return os.path.join("url_cache", media_id[:10], media_id[11:])
else:
return os.path.join("url_cache", media_id[0:2], media_id[2:4], media_id[4:])
url_cache_filepath = _wrap_in_base_path(url_cache_filepath_rel)
def url_cache_filepath_dirs_to_delete(self, media_id: str) -> List[str]:
"The dirs to try and remove if we delete the media_id file"
if NEW_FORMAT_ID_RE.match(media_id):
return [os.path.join(self.base_path, "url_cache", media_id[:10])]
else:
return [
os.path.join(self.base_path, "url_cache", media_id[0:2], media_id[2:4]),
os.path.join(self.base_path, "url_cache", media_id[0:2]),
]
def url_cache_thumbnail_rel(
self, media_id: str, width: int, height: int, content_type: str, method: str
) -> str:
# Media id is of the form <DATE><RANDOM_STRING>
# E.g.: 2017-09-28-fsdRDt24DS234dsf
top_level_type, sub_type = content_type.split("/")
file_name = "%i-%i-%s-%s-%s" % (width, height, top_level_type, sub_type, method)
if NEW_FORMAT_ID_RE.match(media_id):
return os.path.join(
"url_cache_thumbnails", media_id[:10], media_id[11:], file_name
)
else:
return os.path.join(
"url_cache_thumbnails",
media_id[0:2],
media_id[2:4],
media_id[4:],
file_name,
)
url_cache_thumbnail = _wrap_in_base_path(url_cache_thumbnail_rel)
def url_cache_thumbnail_directory_rel(self, media_id: str) -> str:
# Media id is of the form <DATE><RANDOM_STRING>
# E.g.: 2017-09-28-fsdRDt24DS234dsf
if NEW_FORMAT_ID_RE.match(media_id):
return os.path.join("url_cache_thumbnails", media_id[:10], media_id[11:])
else:
return os.path.join(
"url_cache_thumbnails",
media_id[0:2],
media_id[2:4],
media_id[4:],
)
url_cache_thumbnail_directory = _wrap_in_base_path(
url_cache_thumbnail_directory_rel
)
def url_cache_thumbnail_dirs_to_delete(self, media_id: str) -> List[str]:
"The dirs to try and remove if we delete the media_id thumbnails"
# Media id is of the form <DATE><RANDOM_STRING>
# E.g.: 2017-09-28-fsdRDt24DS234dsf
if NEW_FORMAT_ID_RE.match(media_id):
return [
os.path.join(
self.base_path, "url_cache_thumbnails", media_id[:10], media_id[11:]
),
os.path.join(self.base_path, "url_cache_thumbnails", media_id[:10]),
]
else:
return [
os.path.join(
self.base_path,
"url_cache_thumbnails",
media_id[0:2],
media_id[2:4],
media_id[4:],
),
os.path.join(
self.base_path, "url_cache_thumbnails", media_id[0:2], media_id[2:4]
),
os.path.join(self.base_path, "url_cache_thumbnails", media_id[0:2]),
]
|
GHSA-3hfw-x7gx-437c
|
synapse/util/stringutils.py
|
@@ -19,6 +19,8 @@
from collections.abc import Iterable
from typing import Optional, Tuple
+from netaddr import valid_ipv6
+
from synapse.api.errors import Codes, SynapseError
_string_with_symbols = string.digits + string.ascii_letters + ".,;:^&*-_+=#~@"
@@ -97,7 +99,10 @@ def parse_server_name(server_name: str) -> Tuple[str, Optional[int]]:
raise ValueError("Invalid server name '%s'" % server_name)
-VALID_HOST_REGEX = re.compile("\\A[0-9a-zA-Z.-]+\\Z")
+# An approximation of the domain name syntax in RFC 1035, section 2.3.1.
+# NB: "\Z" is not equivalent to "$".
+# The latter will match the position before a "\n" at the end of a string.
+VALID_HOST_REGEX = re.compile("\\A[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*\\Z")
def parse_and_validate_server_name(server_name: str) -> Tuple[str, Optional[int]]:
@@ -122,13 +127,15 @@ def parse_and_validate_server_name(server_name: str) -> Tuple[str, Optional[int]
if host[0] == "[":
if host[-1] != "]":
raise ValueError("Mismatched [...] in server name '%s'" % (server_name,))
- return host, port
- # otherwise it should only be alphanumerics.
- if not VALID_HOST_REGEX.match(host):
- raise ValueError(
- "Server name '%s' contains invalid characters" % (server_name,)
- )
+ # valid_ipv6 raises when given an empty string
+ ipv6_address = host[1:-1]
+ if not ipv6_address or not valid_ipv6(ipv6_address):
+ raise ValueError(
+ "Server name '%s' is not a valid IPv6 address" % (server_name,)
+ )
+ elif not VALID_HOST_REGEX.match(host):
+ raise ValueError("Server name '%s' has an invalid format" % (server_name,))
return host, port
|
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import re
import secrets
import string
from collections.abc import Iterable
from typing import Optional, Tuple
from synapse.api.errors import Codes, SynapseError
_string_with_symbols = string.digits + string.ascii_letters + ".,;:^&*-_+=#~@"
# https://matrix.org/docs/spec/client_server/r0.6.0#post-matrix-client-r0-register-email-requesttoken
CLIENT_SECRET_REGEX = re.compile(r"^[0-9a-zA-Z\.=_\-]+$")
# https://matrix.org/docs/spec/client_server/r0.6.1#matrix-content-mxc-uris,
# together with https://github.com/matrix-org/matrix-doc/issues/2177 which basically
# says "there is no grammar for media ids"
#
# The server_name part of this is purposely lax: use parse_and_validate_mxc for
# additional validation.
#
MXC_REGEX = re.compile("^mxc://([^/]+)/([^/#?]+)$")
def random_string(length: int) -> str:
"""Generate a cryptographically secure string of random letters.
Drawn from the characters: `a-z` and `A-Z`
"""
return "".join(secrets.choice(string.ascii_letters) for _ in range(length))
def random_string_with_symbols(length: int) -> str:
"""Generate a cryptographically secure string of random letters/numbers/symbols.
Drawn from the characters: `a-z`, `A-Z`, `0-9`, and `.,;:^&*-_+=#~@`
"""
return "".join(secrets.choice(_string_with_symbols) for _ in range(length))
def is_ascii(s: bytes) -> bool:
try:
s.decode("ascii").encode("ascii")
except UnicodeError:
return False
return True
def assert_valid_client_secret(client_secret: str) -> None:
"""Validate that a given string matches the client_secret defined by the spec"""
if (
len(client_secret) <= 0
or len(client_secret) > 255
or CLIENT_SECRET_REGEX.match(client_secret) is None
):
raise SynapseError(
400, "Invalid client_secret parameter", errcode=Codes.INVALID_PARAM
)
def parse_server_name(server_name: str) -> Tuple[str, Optional[int]]:
"""Split a server name into host/port parts.
Args:
server_name: server name to parse
Returns:
host/port parts.
Raises:
ValueError if the server name could not be parsed.
"""
try:
if server_name[-1] == "]":
# ipv6 literal, hopefully
return server_name, None
domain_port = server_name.rsplit(":", 1)
domain = domain_port[0]
port = int(domain_port[1]) if domain_port[1:] else None
return domain, port
except Exception:
raise ValueError("Invalid server name '%s'" % server_name)
VALID_HOST_REGEX = re.compile("\\A[0-9a-zA-Z.-]+\\Z")
def parse_and_validate_server_name(server_name: str) -> Tuple[str, Optional[int]]:
"""Split a server name into host/port parts and do some basic validation.
Args:
server_name: server name to parse
Returns:
host/port parts.
Raises:
ValueError if the server name could not be parsed.
"""
host, port = parse_server_name(server_name)
# these tests don't need to be bulletproof as we'll find out soon enough
# if somebody is giving us invalid data. What we *do* need is to be sure
# that nobody is sneaking IP literals in that look like hostnames, etc.
# look for ipv6 literals
if host[0] == "[":
if host[-1] != "]":
raise ValueError("Mismatched [...] in server name '%s'" % (server_name,))
return host, port
# otherwise it should only be alphanumerics.
if not VALID_HOST_REGEX.match(host):
raise ValueError(
"Server name '%s' contains invalid characters" % (server_name,)
)
return host, port
def valid_id_server_location(id_server: str) -> bool:
"""Check whether an identity server location, such as the one passed as the
`id_server` parameter to `/_matrix/client/r0/account/3pid/bind`, is valid.
A valid identity server location consists of a valid hostname and optional
port number, optionally followed by any number of `/` delimited path
components, without any fragment or query string parts.
Args:
id_server: identity server location string to validate
Returns:
True if valid, False otherwise.
"""
components = id_server.split("/", 1)
host = components[0]
try:
parse_and_validate_server_name(host)
except ValueError:
return False
if len(components) < 2:
# no path
return True
path = components[1]
return "#" not in path and "?" not in path
def parse_and_validate_mxc_uri(mxc: str) -> Tuple[str, Optional[int], str]:
"""Parse the given string as an MXC URI
Checks that the "server name" part is a valid server name
Args:
mxc: the (alleged) MXC URI to be checked
Returns:
hostname, port, media id
Raises:
ValueError if the URI cannot be parsed
"""
m = MXC_REGEX.match(mxc)
if not m:
raise ValueError("mxc URI %r did not match expected format" % (mxc,))
server_name = m.group(1)
media_id = m.group(2)
host, port = parse_and_validate_server_name(server_name)
return host, port, media_id
def shortstr(iterable: Iterable, maxitems: int = 5) -> str:
"""If iterable has maxitems or fewer, return the stringification of a list
containing those items.
Otherwise, return the stringification of a a list with the first maxitems items,
followed by "...".
Args:
iterable: iterable to truncate
maxitems: number of items to return before truncating
"""
items = list(itertools.islice(iterable, maxitems + 1))
if len(items) <= maxitems:
return str(items)
return "[" + ", ".join(repr(r) for r in items[:maxitems]) + ", ...]"
def strtobool(val: str) -> bool:
"""Convert a string representation of truth to True or False
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
This is lifted from distutils.util.strtobool, with the exception that it actually
returns a bool, rather than an int.
"""
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return True
elif val in ("n", "no", "f", "false", "off", "0"):
return False
else:
raise ValueError("invalid truth value %r" % (val,))
_BASE62 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
def base62_encode(num: int, minwidth: int = 1) -> str:
"""Encode a number using base62
Args:
num: number to be encoded
minwidth: width to pad to, if the number is small
"""
res = ""
while num:
num, rem = divmod(num, 62)
res = _BASE62[rem] + res
# pad to minimum width
pad = "0" * (minwidth - len(res))
return pad + res
|
GHSA-3hfw-x7gx-437c
|
tests/http/test_endpoint.py
|
@@ -36,8 +36,11 @@ def test_validate_bad_server_names(self):
"localhost:http", # non-numeric port
"1234]", # smells like ipv6 literal but isn't
"[1234",
+ "[1.2.3.4]",
"underscore_.com",
"percent%65.com",
+ "newline.com\n",
+ ".empty-label.com",
"1234:5678:80", # too many colons
]
for i in test_data:
|
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.util.stringutils import parse_and_validate_server_name, parse_server_name
from tests import unittest
class ServerNameTestCase(unittest.TestCase):
def test_parse_server_name(self):
test_data = {
"localhost": ("localhost", None),
"my-example.com:1234": ("my-example.com", 1234),
"1.2.3.4": ("1.2.3.4", None),
"[0abc:1def::1234]": ("[0abc:1def::1234]", None),
"1.2.3.4:1": ("1.2.3.4", 1),
"[0abc:1def::1234]:8080": ("[0abc:1def::1234]", 8080),
}
for i, o in test_data.items():
self.assertEqual(parse_server_name(i), o)
def test_validate_bad_server_names(self):
test_data = [
"", # empty
"localhost:http", # non-numeric port
"1234]", # smells like ipv6 literal but isn't
"[1234",
"underscore_.com",
"percent%65.com",
"1234:5678:80", # too many colons
]
for i in test_data:
try:
parse_and_validate_server_name(i)
self.fail(
"Expected parse_and_validate_server_name('%s') to throw" % (i,)
)
except ValueError:
pass
|
GHSA-3hfw-x7gx-437c
|
tests/rest/media/v1/test_filepath.py
|
@@ -11,6 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import inspect
+from typing import Iterable
+
from synapse.rest.media.v1.filepath import MediaFilePaths
from tests import unittest
@@ -236,3 +239,250 @@ def test_url_cache_thumbnail_dirs_to_delete_legacy(self):
"/media_store/url_cache_thumbnails/Ge",
],
)
+
+ def test_server_name_validation(self):
+ """Test validation of server names"""
+ self._test_path_validation(
+ [
+ "remote_media_filepath_rel",
+ "remote_media_filepath",
+ "remote_media_thumbnail_rel",
+ "remote_media_thumbnail",
+ "remote_media_thumbnail_rel_legacy",
+ "remote_media_thumbnail_dir",
+ ],
+ parameter="server_name",
+ valid_values=[
+ "matrix.org",
+ "matrix.org:8448",
+ "matrix-federation.matrix.org",
+ "matrix-federation.matrix.org:8448",
+ "10.1.12.123",
+ "10.1.12.123:8448",
+ "[fd00:abcd::ffff]",
+ "[fd00:abcd::ffff]:8448",
+ ],
+ invalid_values=[
+ "/matrix.org",
+ "matrix.org/..",
+ "matrix.org\x00",
+ "",
+ ".",
+ "..",
+ "/",
+ ],
+ )
+
+ def test_file_id_validation(self):
+ """Test validation of local, remote and legacy URL cache file / media IDs"""
+ # File / media IDs get split into three parts to form paths, consisting of the
+ # first two characters, next two characters and rest of the ID.
+ valid_file_ids = [
+ "GerZNDnDZVjsOtardLuwfIBg",
+ # Unexpected, but produces an acceptable path:
+ "GerZN", # "N" becomes the last directory
+ ]
+ invalid_file_ids = [
+ "/erZNDnDZVjsOtardLuwfIBg",
+ "Ge/ZNDnDZVjsOtardLuwfIBg",
+ "GerZ/DnDZVjsOtardLuwfIBg",
+ "GerZ/..",
+ "G\x00rZNDnDZVjsOtardLuwfIBg",
+ "Ger\x00NDnDZVjsOtardLuwfIBg",
+ "GerZNDnDZVjsOtardLuwfIBg\x00",
+ "",
+ "Ge",
+ "GerZ",
+ "GerZ.",
+ "..rZNDnDZVjsOtardLuwfIBg",
+ "Ge..NDnDZVjsOtardLuwfIBg",
+ "GerZ..",
+ "GerZ/",
+ ]
+
+ self._test_path_validation(
+ [
+ "local_media_filepath_rel",
+ "local_media_filepath",
+ "local_media_thumbnail_rel",
+ "local_media_thumbnail",
+ "local_media_thumbnail_dir",
+ # Legacy URL cache media IDs
+ "url_cache_filepath_rel",
+ "url_cache_filepath",
+ # `url_cache_filepath_dirs_to_delete` is tested below.
+ "url_cache_thumbnail_rel",
+ "url_cache_thumbnail",
+ "url_cache_thumbnail_directory_rel",
+ "url_cache_thumbnail_directory",
+ "url_cache_thumbnail_dirs_to_delete",
+ ],
+ parameter="media_id",
+ valid_values=valid_file_ids,
+ invalid_values=invalid_file_ids,
+ )
+
+ # `url_cache_filepath_dirs_to_delete` ignores what would be the last path
+ # component, so only the first 4 characters matter.
+ self._test_path_validation(
+ [
+ "url_cache_filepath_dirs_to_delete",
+ ],
+ parameter="media_id",
+ valid_values=valid_file_ids,
+ invalid_values=[
+ "/erZNDnDZVjsOtardLuwfIBg",
+ "Ge/ZNDnDZVjsOtardLuwfIBg",
+ "G\x00rZNDnDZVjsOtardLuwfIBg",
+ "Ger\x00NDnDZVjsOtardLuwfIBg",
+ "",
+ "Ge",
+ "..rZNDnDZVjsOtardLuwfIBg",
+ "Ge..NDnDZVjsOtardLuwfIBg",
+ ],
+ )
+
+ self._test_path_validation(
+ [
+ "remote_media_filepath_rel",
+ "remote_media_filepath",
+ "remote_media_thumbnail_rel",
+ "remote_media_thumbnail",
+ "remote_media_thumbnail_rel_legacy",
+ "remote_media_thumbnail_dir",
+ ],
+ parameter="file_id",
+ valid_values=valid_file_ids,
+ invalid_values=invalid_file_ids,
+ )
+
+ def test_url_cache_media_id_validation(self):
+ """Test validation of URL cache media IDs"""
+ self._test_path_validation(
+ [
+ "url_cache_filepath_rel",
+ "url_cache_filepath",
+ # `url_cache_filepath_dirs_to_delete` only cares about the date prefix
+ "url_cache_thumbnail_rel",
+ "url_cache_thumbnail",
+ "url_cache_thumbnail_directory_rel",
+ "url_cache_thumbnail_directory",
+ "url_cache_thumbnail_dirs_to_delete",
+ ],
+ parameter="media_id",
+ valid_values=[
+ "2020-01-02_GerZNDnDZVjsOtar",
+ "2020-01-02_G", # Unexpected, but produces an acceptable path
+ ],
+ invalid_values=[
+ "2020-01-02",
+ "2020-01-02-",
+ "2020-01-02-.",
+ "2020-01-02-..",
+ "2020-01-02-/",
+ "2020-01-02-/GerZNDnDZVjsOtar",
+ "2020-01-02-GerZNDnDZVjsOtar/..",
+ "2020-01-02-GerZNDnDZVjsOtar\x00",
+ ],
+ )
+
+ def test_content_type_validation(self):
+ """Test validation of thumbnail content types"""
+ self._test_path_validation(
+ [
+ "local_media_thumbnail_rel",
+ "local_media_thumbnail",
+ "remote_media_thumbnail_rel",
+ "remote_media_thumbnail",
+ "remote_media_thumbnail_rel_legacy",
+ "url_cache_thumbnail_rel",
+ "url_cache_thumbnail",
+ ],
+ parameter="content_type",
+ valid_values=[
+ "image/jpeg",
+ ],
+ invalid_values=[
+ "", # ValueError: not enough values to unpack
+ "image/jpeg/abc", # ValueError: too many values to unpack
+ "image/jpeg\x00",
+ ],
+ )
+
+ def test_thumbnail_method_validation(self):
+ """Test validation of thumbnail methods"""
+ self._test_path_validation(
+ [
+ "local_media_thumbnail_rel",
+ "local_media_thumbnail",
+ "remote_media_thumbnail_rel",
+ "remote_media_thumbnail",
+ "url_cache_thumbnail_rel",
+ "url_cache_thumbnail",
+ ],
+ parameter="method",
+ valid_values=[
+ "crop",
+ "scale",
+ ],
+ invalid_values=[
+ "/scale",
+ "scale/..",
+ "scale\x00",
+ "/",
+ ],
+ )
+
+ def _test_path_validation(
+ self,
+ methods: Iterable[str],
+ parameter: str,
+ valid_values: Iterable[str],
+ invalid_values: Iterable[str],
+ ):
+ """Test that the specified methods validate the named parameter as expected
+
+ Args:
+ methods: The names of `MediaFilePaths` methods to test
+ parameter: The name of the parameter to test
+ valid_values: A list of parameter values that are expected to be accepted
+ invalid_values: A list of parameter values that are expected to be rejected
+
+ Raises:
+ AssertionError: If a value was accepted when it should have failed
+ validation.
+ ValueError: If a value failed validation when it should have been accepted.
+ """
+ for method in methods:
+ get_path = getattr(self.filepaths, method)
+
+ parameters = inspect.signature(get_path).parameters
+ kwargs = {
+ "server_name": "matrix.org",
+ "media_id": "GerZNDnDZVjsOtardLuwfIBg",
+ "file_id": "GerZNDnDZVjsOtardLuwfIBg",
+ "width": 800,
+ "height": 600,
+ "content_type": "image/jpeg",
+ "method": "scale",
+ }
+
+ if get_path.__name__.startswith("url_"):
+ kwargs["media_id"] = "2020-01-02_GerZNDnDZVjsOtar"
+
+ kwargs = {k: v for k, v in kwargs.items() if k in parameters}
+ kwargs.pop(parameter)
+
+ for value in valid_values:
+ kwargs[parameter] = value
+ get_path(**kwargs)
+ # No exception should be raised
+
+ for value in invalid_values:
+ with self.assertRaises(ValueError):
+ kwargs[parameter] = value
+ path_or_list = get_path(**kwargs)
+ self.fail(
+ f"{value!r} unexpectedly passed validation: "
+ f"{method} returned {path_or_list!r}"
+ )
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.rest.media.v1.filepath import MediaFilePaths
from tests import unittest
class MediaFilePathsTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
self.filepaths = MediaFilePaths("/media_store")
def test_local_media_filepath(self):
"""Test local media paths"""
self.assertEqual(
self.filepaths.local_media_filepath_rel("GerZNDnDZVjsOtardLuwfIBg"),
"local_content/Ge/rZ/NDnDZVjsOtardLuwfIBg",
)
self.assertEqual(
self.filepaths.local_media_filepath("GerZNDnDZVjsOtardLuwfIBg"),
"/media_store/local_content/Ge/rZ/NDnDZVjsOtardLuwfIBg",
)
def test_local_media_thumbnail(self):
"""Test local media thumbnail paths"""
self.assertEqual(
self.filepaths.local_media_thumbnail_rel(
"GerZNDnDZVjsOtardLuwfIBg", 800, 600, "image/jpeg", "scale"
),
"local_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale",
)
self.assertEqual(
self.filepaths.local_media_thumbnail(
"GerZNDnDZVjsOtardLuwfIBg", 800, 600, "image/jpeg", "scale"
),
"/media_store/local_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale",
)
def test_local_media_thumbnail_dir(self):
"""Test local media thumbnail directory paths"""
self.assertEqual(
self.filepaths.local_media_thumbnail_dir("GerZNDnDZVjsOtardLuwfIBg"),
"/media_store/local_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg",
)
def test_remote_media_filepath(self):
"""Test remote media paths"""
self.assertEqual(
self.filepaths.remote_media_filepath_rel(
"example.com", "GerZNDnDZVjsOtardLuwfIBg"
),
"remote_content/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg",
)
self.assertEqual(
self.filepaths.remote_media_filepath(
"example.com", "GerZNDnDZVjsOtardLuwfIBg"
),
"/media_store/remote_content/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg",
)
def test_remote_media_thumbnail(self):
"""Test remote media thumbnail paths"""
self.assertEqual(
self.filepaths.remote_media_thumbnail_rel(
"example.com",
"GerZNDnDZVjsOtardLuwfIBg",
800,
600,
"image/jpeg",
"scale",
),
"remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale",
)
self.assertEqual(
self.filepaths.remote_media_thumbnail(
"example.com",
"GerZNDnDZVjsOtardLuwfIBg",
800,
600,
"image/jpeg",
"scale",
),
"/media_store/remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale",
)
def test_remote_media_thumbnail_legacy(self):
"""Test old-style remote media thumbnail paths"""
self.assertEqual(
self.filepaths.remote_media_thumbnail_rel_legacy(
"example.com", "GerZNDnDZVjsOtardLuwfIBg", 800, 600, "image/jpeg"
),
"remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg",
)
def test_remote_media_thumbnail_dir(self):
"""Test remote media thumbnail directory paths"""
self.assertEqual(
self.filepaths.remote_media_thumbnail_dir(
"example.com", "GerZNDnDZVjsOtardLuwfIBg"
),
"/media_store/remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg",
)
def test_url_cache_filepath(self):
"""Test URL cache paths"""
self.assertEqual(
self.filepaths.url_cache_filepath_rel("2020-01-02_GerZNDnDZVjsOtar"),
"url_cache/2020-01-02/GerZNDnDZVjsOtar",
)
self.assertEqual(
self.filepaths.url_cache_filepath("2020-01-02_GerZNDnDZVjsOtar"),
"/media_store/url_cache/2020-01-02/GerZNDnDZVjsOtar",
)
def test_url_cache_filepath_legacy(self):
"""Test old-style URL cache paths"""
self.assertEqual(
self.filepaths.url_cache_filepath_rel("GerZNDnDZVjsOtardLuwfIBg"),
"url_cache/Ge/rZ/NDnDZVjsOtardLuwfIBg",
)
self.assertEqual(
self.filepaths.url_cache_filepath("GerZNDnDZVjsOtardLuwfIBg"),
"/media_store/url_cache/Ge/rZ/NDnDZVjsOtardLuwfIBg",
)
def test_url_cache_filepath_dirs_to_delete(self):
"""Test URL cache cleanup paths"""
self.assertEqual(
self.filepaths.url_cache_filepath_dirs_to_delete(
"2020-01-02_GerZNDnDZVjsOtar"
),
["/media_store/url_cache/2020-01-02"],
)
def test_url_cache_filepath_dirs_to_delete_legacy(self):
"""Test old-style URL cache cleanup paths"""
self.assertEqual(
self.filepaths.url_cache_filepath_dirs_to_delete(
"GerZNDnDZVjsOtardLuwfIBg"
),
[
"/media_store/url_cache/Ge/rZ",
"/media_store/url_cache/Ge",
],
)
def test_url_cache_thumbnail(self):
"""Test URL cache thumbnail paths"""
self.assertEqual(
self.filepaths.url_cache_thumbnail_rel(
"2020-01-02_GerZNDnDZVjsOtar", 800, 600, "image/jpeg", "scale"
),
"url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar/800-600-image-jpeg-scale",
)
self.assertEqual(
self.filepaths.url_cache_thumbnail(
"2020-01-02_GerZNDnDZVjsOtar", 800, 600, "image/jpeg", "scale"
),
"/media_store/url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar/800-600-image-jpeg-scale",
)
def test_url_cache_thumbnail_legacy(self):
"""Test old-style URL cache thumbnail paths"""
self.assertEqual(
self.filepaths.url_cache_thumbnail_rel(
"GerZNDnDZVjsOtardLuwfIBg", 800, 600, "image/jpeg", "scale"
),
"url_cache_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale",
)
self.assertEqual(
self.filepaths.url_cache_thumbnail(
"GerZNDnDZVjsOtardLuwfIBg", 800, 600, "image/jpeg", "scale"
),
"/media_store/url_cache_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale",
)
def test_url_cache_thumbnail_directory(self):
"""Test URL cache thumbnail directory paths"""
self.assertEqual(
self.filepaths.url_cache_thumbnail_directory_rel(
"2020-01-02_GerZNDnDZVjsOtar"
),
"url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar",
)
self.assertEqual(
self.filepaths.url_cache_thumbnail_directory("2020-01-02_GerZNDnDZVjsOtar"),
"/media_store/url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar",
)
def test_url_cache_thumbnail_directory_legacy(self):
"""Test old-style URL cache thumbnail directory paths"""
self.assertEqual(
self.filepaths.url_cache_thumbnail_directory_rel(
"GerZNDnDZVjsOtardLuwfIBg"
),
"url_cache_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg",
)
self.assertEqual(
self.filepaths.url_cache_thumbnail_directory("GerZNDnDZVjsOtardLuwfIBg"),
"/media_store/url_cache_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg",
)
def test_url_cache_thumbnail_dirs_to_delete(self):
"""Test URL cache thumbnail cleanup paths"""
self.assertEqual(
self.filepaths.url_cache_thumbnail_dirs_to_delete(
"2020-01-02_GerZNDnDZVjsOtar"
),
[
"/media_store/url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar",
"/media_store/url_cache_thumbnails/2020-01-02",
],
)
def test_url_cache_thumbnail_dirs_to_delete_legacy(self):
"""Test old-style URL cache thumbnail cleanup paths"""
self.assertEqual(
self.filepaths.url_cache_thumbnail_dirs_to_delete(
"GerZNDnDZVjsOtardLuwfIBg"
),
[
"/media_store/url_cache_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg",
"/media_store/url_cache_thumbnails/Ge/rZ",
"/media_store/url_cache_thumbnails/Ge",
],
)
|
GHSA-3hfw-x7gx-437c
|
pyshop/helpers/download.py
|
@@ -33,7 +33,12 @@ def __call__(self, value, system):
if not os.path.exists(dir_):
os.makedirs(dir_, 0750)
- resp = requests.get(value['url'])
+ if value['url'].startswith('https://pypi.python.org'):
+ verify = os.path.join(os.path.dirname(__file__), 'pypi.pem')
+ else:
+ verify = value['url'].startswith('https:')
+
+ resp = requests.get(value['url'], verify=verify)
with open(f, 'wb') as rf:
rf.write(resp.content)
return resp.content
|
import os
import os.path
import mimetypes
import requests
from zope.interface import implements
from pyramid.interfaces import ITemplateRenderer
class ReleaseFileRenderer(object):
implements(ITemplateRenderer)
def __init__(self, repository_root):
self.repository_root = repository_root
def __call__(self, value, system):
if 'request' in system:
request = system['request']
mime, encoding = mimetypes.guess_type(value['filename'])
request.response_content_type = mime
if encoding:
request.response_encoding = encoding
f = os.path.join(self.repository_root,
value['filename'][0].lower(),
value['filename'])
if not os.path.exists(f):
dir_ = os.path.join(self.repository_root,
value['filename'][0].lower())
if not os.path.exists(dir_):
os.makedirs(dir_, 0750)
resp = requests.get(value['url'])
with open(f, 'wb') as rf:
rf.write(resp.content)
return resp.content
else:
data = ''
with open(f, 'rb') as rf:
data = ''
while True:
content = rf.read(2<<16)
if not content:
break
data += content
return data
def renderer_factory(info):
return ReleaseFileRenderer(info.settings['pyshop.repository'])
|
PYSEC-2013-10
|
pyshop/views/repository.py
|
@@ -6,8 +6,12 @@ def get_release_file(root, request):
session = DBSession()
f = ReleaseFile.by_id(session, int(request.matchdict['file_id']))
+ url = f.url
+ if url.startswith('http://pypi.python.org'):
+ url = 'https' + url[4:]
+
rv = {'id': f.id,
- 'url': f.url,
+ 'url': url,
'filename': f.filename,
}
f.downloads += 1
|
# -*- coding: utf-8 -*-
from pyshop.models import DBSession, ReleaseFile
def get_release_file(root, request):
session = DBSession()
f = ReleaseFile.by_id(session, int(request.matchdict['file_id']))
rv = {'id': f.id,
'url': f.url,
'filename': f.filename,
}
f.downloads += 1
f.release.downloads += 1
f.release.package.downloads += 1
session.add(f.release.package)
session.add(f.release)
session.add(f)
return rv
|
PYSEC-2013-10
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.