Dataset Viewer
filename
stringlengths 6
103
| patch
stringlengths 47
76k
| parent_content
stringlengths 17
1.6M
| id
stringlengths 12
19
|
---|---|---|---|
aiosmtpd/smtp.py
|
@@ -87,7 +87,7 @@ class _DataState(enum.Enum):
EMPTY_BARR = bytearray()
EMPTYBYTES = b''
MISSING = _Missing.MISSING
-NEWLINE = '\n'
+NEWLINE = '\r\n'
VALID_AUTHMECH = re.compile(r"[A-Z0-9_-]+\Z")
# https://tools.ietf.org/html/rfc3207.html#page-3
@@ -1427,9 +1427,10 @@ async def smtp_DATA(self, arg: str) -> None:
# Since eof_received cancels this coroutine,
# readuntil() can never raise asyncio.IncompleteReadError.
try:
- line: bytes = await self._reader.readuntil()
+ # https://datatracker.ietf.org/doc/html/rfc5321#section-2.3.8
+ line: bytes = await self._reader.readuntil(b'\r\n')
log.debug('DATA readline: %s', line)
- assert line.endswith(b'\n')
+ assert line.endswith(b'\r\n')
except asyncio.CancelledError:
# The connection got reset during the DATA command.
log.info('Connection lost during DATA')
@@ -1446,7 +1447,7 @@ async def smtp_DATA(self, arg: str) -> None:
data *= 0
# Drain the stream anyways
line = await self._reader.read(e.consumed)
- assert not line.endswith(b'\n')
+ assert not line.endswith(b'\r\n')
# A lone dot in a line signals the end of DATA.
if not line_fragments and line == b'.\r\n':
break
@@ -1458,7 +1459,7 @@ async def smtp_DATA(self, arg: str) -> None:
# Discard data immediately to prevent memory pressure
data *= 0
line_fragments.append(line)
- if line.endswith(b'\n'):
+ if line.endswith(b'\r\n'):
# Record data only if state is "NOMINAL"
if state == _DataState.NOMINAL:
line = EMPTY_BARR.join(line_fragments)
|
# Copyright 2014-2021 The aiosmtpd Developers
# SPDX-License-Identifier: Apache-2.0
import asyncio
import asyncio.sslproto as sslproto
import binascii
import collections
import enum
import inspect
import logging
import re
import socket
import ssl
from base64 import b64decode, b64encode
from email._header_value_parser import get_addr_spec, get_angle_addr
from email.errors import HeaderParseError
from typing import (
Any,
AnyStr,
Awaitable,
Callable,
Dict,
Iterable,
List,
MutableMapping,
NamedTuple,
Optional,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
)
from warnings import warn
import attr
from public import public
from aiosmtpd import __version__, _get_or_new_eventloop
from aiosmtpd.proxy_protocol import ProxyData, get_proxy
# region #### Custom Data Types #######################################################
class _Missing(enum.Enum):
MISSING = object()
class _AuthMechAttr(NamedTuple):
method: "AuthMechanismType"
is_builtin: bool
class _DataState(enum.Enum):
NOMINAL = enum.auto()
TOO_LONG = enum.auto()
TOO_MUCH = enum.auto()
AuthCallbackType = Callable[[str, bytes, bytes], bool]
AuthenticatorType = Callable[["SMTP", "Session", "Envelope", str, Any], "AuthResult"]
AuthMechanismType = Callable[["SMTP", List[str]], Awaitable[Any]]
_TriStateType = Union[None, _Missing, bytes]
RT = TypeVar("RT") # "ReturnType"
DecoratorType = Callable[[Callable[..., RT]], Callable[..., RT]]
# endregion
# region #### Constant & Constant-likes ###############################################
__all__ = [
"AuthCallbackType",
"AuthMechanismType",
"MISSING",
"__version__",
] # Will be added to by @public
__ident__ = 'Python SMTP {}'.format(__version__)
log = logging.getLogger('mail.log')
BOGUS_LIMIT = 5
CALL_LIMIT_DEFAULT = 20
DATA_SIZE_DEFAULT = 2**25 # Where does this number come from, I wonder...
EMPTY_BARR = bytearray()
EMPTYBYTES = b''
MISSING = _Missing.MISSING
NEWLINE = '\n'
VALID_AUTHMECH = re.compile(r"[A-Z0-9_-]+\Z")
# https://tools.ietf.org/html/rfc3207.html#page-3
ALLOWED_BEFORE_STARTTLS = {"NOOP", "EHLO", "STARTTLS", "QUIT"}
# Auth hiding regexes
CLIENT_AUTH_B = re.compile(
# Matches "AUTH" <mechanism> <whitespace_but_not_\r_nor_\n>
br"(?P<authm>\s*AUTH\s+\S+[^\S\r\n]+)"
# Param to AUTH <mechanism>. We only need to sanitize if param is given, which
# for some mechanisms contain sensitive info. If no param is given, then we
# can skip (match fails)
br"(\S+)"
# Optional bCRLF at end. Why optional? Because we also want to sanitize the
# stripped line. If no bCRLF, then this group will be b""
br"(?P<crlf>(?:\r\n)?)", re.IGNORECASE
)
"""Regex that matches 'AUTH <mech> <param>' commend"""
# endregion
@attr.s
class AuthResult:
"""
Contains the result of authentication, to be returned to the smtp_AUTH method.
All initialization arguments _must_ be keyworded!
"""
success: bool = attr.ib(kw_only=True)
"""Indicates authentication is successful or not"""
handled: bool = attr.ib(kw_only=True, default=True)
"""
True means everything (including sending of status code) has been handled by the
AUTH handler and smtp_AUTH should not do anything else.
Applicable only if success == False.
"""
message: Optional[str] = attr.ib(kw_only=True, default=None)
"""
Optional message for additional handling by smtp_AUTH.
Applicable only if handled == False.
"""
auth_data: Optional[Any] = attr.ib(kw_only=True, default=None, repr=lambda x: "...")
"""
Optional free-form authentication data. For the built-in mechanisms, it is usually
an instance of LoginPassword. Other implementations are free to use any data
structure here.
"""
@public
class LoginPassword(NamedTuple):
login: bytes
password: bytes
def __str__(self) -> str:
return f"LoginPassword(login='{self.login.decode()}', password=...)"
def __repr__(self) -> str:
return str(self)
@public
class Session:
def __init__(self, loop: asyncio.AbstractEventLoop):
self.peer: Optional[str] = None
self.ssl: Optional[dict[str, Any]] = None
self.host_name: Optional[str] = None
self.extended_smtp = False
self.loop = loop
self.proxy_data: Optional[ProxyData] = None
"""Data from PROXY Protocol handshake"""
self._login_data = None
self.auth_data = None
"""
New system *optional* authentication data;
can contain anything returned by the authenticator callback.
Can even be None; check `authenticated` attribute to determine
if AUTH successful or not.
"""
self.authenticated: Optional[bool] = None
@property
def login_data(self) -> Any:
"""Legacy login_data, usually containing the username"""
log.warning(
"Session.login_data is deprecated and will be removed in version 2.0"
)
return self._login_data
@login_data.setter
def login_data(self, value: Any) -> None:
log.warning(
"Session.login_data is deprecated and will be removed in version 2.0"
)
self._login_data = value
@public
class Envelope:
def __init__(self) -> None:
self.mail_from: Optional[str] = None
self.mail_options: List[str] = []
self.smtp_utf8 = False
self.content: Union[None, bytes, str] = None
self.original_content: Optional[bytes] = None
self.rcpt_tos: List[str] = []
self.rcpt_options: List[str] = []
# This is here to enable debugging output when the -E option is given to the
# unit test suite. In that case, this function is mocked to set the debug
# level on the loop (as if PYTHONASYNCIODEBUG=1 were set).
def make_loop() -> asyncio.AbstractEventLoop:
return _get_or_new_eventloop()
@public
def syntax(
text: str, extended: Optional[str] = None, when: Optional[str] = None
) -> DecoratorType:
"""
A @decorator that provides helptext for (E)SMTP HELP.
Applies for smtp_* methods only!
:param text: Help text for (E)SMTP HELP
:param extended: Additional text for ESMTP HELP (appended to text)
:param when: The name of the attribute of SMTP class to check; if the value
of the attribute is false-y then HELP will not be available for the command
"""
def decorator(f: Callable[..., RT]) -> Callable[..., RT]:
f.__smtp_syntax__ = text # type: ignore[attr-defined]
f.__smtp_syntax_extended__ = extended # type: ignore[attr-defined]
f.__smtp_syntax_when__ = when # type: ignore[attr-defined]
return f
return decorator
@public
def auth_mechanism(actual_name: str) -> DecoratorType:
"""
A @decorator to explicitly specifies the name of the AUTH mechanism implemented by
the function/method this decorates
:param actual_name: Name of AUTH mechanism. Must consists of [A-Z0-9_-] only.
Will be converted to uppercase
"""
def decorator(f: Callable[..., RT]) -> Callable[..., RT]:
f.__auth_mechanism_name__ = actual_name # type: ignore[attr-defined]
return f
actual_name = actual_name.upper()
if not VALID_AUTHMECH.match(actual_name):
raise ValueError(f"Invalid AUTH mechanism name: {actual_name}")
return decorator
def login_always_fail(
mechanism: str, login: bytes, password: bytes
) -> bool:
return False
def is_int(o: Any) -> bool:
return isinstance(o, int)
@public
class TLSSetupException(Exception):
pass
@public
def sanitize(text: bytes) -> bytes:
m = CLIENT_AUTH_B.match(text)
if m:
return m.group("authm") + b"********" + m.group("crlf")
return text
@public
def sanitized_log(func: Callable[..., None], msg: AnyStr, *args, **kwargs) -> None:
"""
Sanitize args before passing to a logging function.
"""
sanitized_args = [
sanitize(a) if isinstance(a, bytes) else a
for a in args
]
func(msg, *sanitized_args, **kwargs)
@public
class SMTP(asyncio.StreamReaderProtocol):
"""
`Documentation can be found here
<https://aiosmtpd.readthedocs.io/en/latest/smtp.html>`_
"""
command_size_limit = 512
command_size_limits: Dict[str, int] = collections.defaultdict(
lambda: SMTP.command_size_limit)
line_length_limit = 1001
"""Maximum line length according to RFC 5321 s 4.5.3.1.6"""
# The number comes from this calculation:
# (RFC 5322 s 2.1.1 + RFC 6532 s 3.4) 998 octets + CRLF = 1000 octets
# (RFC 5321 s 4.5.3.1.6) 1000 octets + "transparent dot" = 1001 octets
local_part_limit: int = 0
"""
Maximum local part length. (RFC 5321 § 4.5.3.1.1 specifies 64, but lenient)
If 0 or Falsey, local part length is unlimited.
"""
AuthLoginUsernameChallenge = "User Name\x00"
AuthLoginPasswordChallenge = "Password\x00"
def __init__(
self,
handler: Any,
*,
data_size_limit: Optional[int] = DATA_SIZE_DEFAULT,
enable_SMTPUTF8: bool = False,
decode_data: bool = False,
hostname: Optional[str] = None,
ident: Optional[str] = None,
tls_context: Optional[ssl.SSLContext] = None,
require_starttls: bool = False,
timeout: float = 300,
auth_required: bool = False,
auth_require_tls: bool = True,
auth_exclude_mechanism: Optional[Iterable[str]] = None,
auth_callback: Optional[AuthCallbackType] = None,
command_call_limit: Union[int, Dict[str, int], None] = None,
authenticator: Optional[AuthenticatorType] = None,
proxy_protocol_timeout: Optional[Union[int, float]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None
):
self.__ident__ = ident or __ident__
self.loop = loop if loop else make_loop()
super().__init__(
asyncio.StreamReader(loop=self.loop, limit=self.line_length_limit),
client_connected_cb=self._cb_client_connected,
loop=self.loop)
self.event_handler = handler
assert data_size_limit is None or isinstance(data_size_limit, int)
self.data_size_limit = data_size_limit
self.enable_SMTPUTF8 = enable_SMTPUTF8
self._decode_data = decode_data
self.command_size_limits.clear()
if hostname:
self.hostname = hostname
else:
self.hostname = socket.getfqdn()
self.tls_context = tls_context
if tls_context:
if (tls_context.verify_mode
not in {ssl.CERT_NONE, ssl.CERT_OPTIONAL}): # noqa: DUO122
log.warning("tls_context.verify_mode not in {CERT_NONE, "
"CERT_OPTIONAL}; this might cause client "
"connection problems")
elif tls_context.check_hostname:
log.warning("tls_context.check_hostname == True; "
"this might cause client connection problems")
self.require_starttls = tls_context and require_starttls
self._timeout_duration = timeout
self._timeout_handle: Optional[asyncio.TimerHandle] = None
self._tls_handshake_okay = True
self._tls_protocol: Optional[sslproto.SSLProtocol] = None
self._original_transport: Optional[asyncio.BaseTransport] = None
self.session: Optional[Session] = None
self.envelope: Optional[Envelope] = None
self.transport: Optional[asyncio.BaseTransport] = None
self._handler_coroutine: Optional[asyncio.Task[None]] = None
if not auth_require_tls and auth_required:
warn("Requiring AUTH while not requiring TLS "
"can lead to security vulnerabilities!")
log.warning("auth_required == True but auth_require_tls == False")
self._auth_require_tls = auth_require_tls
if proxy_protocol_timeout is not None:
if proxy_protocol_timeout <= 0:
raise ValueError("proxy_protocol_timeout must be > 0")
elif proxy_protocol_timeout < 3.0:
log.warning("proxy_protocol_timeout < 3.0")
self._proxy_timeout = proxy_protocol_timeout
self._authenticator: Optional[AuthenticatorType]
self._auth_callback: Optional[AuthCallbackType]
if authenticator is not None:
self._authenticator = authenticator
self._auth_callback = None
else:
self._auth_callback = auth_callback or login_always_fail
self._authenticator = None
self._auth_required = auth_required
# Get hooks & methods to significantly speedup getattr's
self._auth_methods: Dict[str, _AuthMechAttr] = {
getattr(
mfunc, "__auth_mechanism_name__",
mname.replace("auth_", "").replace("__", "-")
): _AuthMechAttr(mfunc, obj is self)
for obj in (self, handler)
for mname, mfunc in inspect.getmembers(obj)
if mname.startswith("auth_")
}
for m in (auth_exclude_mechanism or []):
self._auth_methods.pop(m, None)
log.info(
"Available AUTH mechanisms: "
+ " ".join(
m + "(builtin)" if impl.is_builtin else m
for m, impl in sorted(self._auth_methods.items())
)
)
self._handle_hooks: Dict[str, Callable] = {
m.replace("handle_", ""): getattr(handler, m)
for m in dir(handler)
if m.startswith("handle_")
}
# When we've deprecated the 4-arg form of handle_EHLO,
# we can -- and should -- remove this whole code block
ehlo_hook = self._handle_hooks.get("EHLO")
if ehlo_hook is None:
self._ehlo_hook_ver = None
else:
ehlo_hook_params = inspect.signature(ehlo_hook).parameters
if len(ehlo_hook_params) == 4:
self._ehlo_hook_ver = "old"
warn("Use the 5-argument handle_EHLO() hook instead of "
"the 4-argument handle_EHLO() hook; "
"support for the 4-argument handle_EHLO() hook will be "
"removed in version 2.0",
DeprecationWarning)
elif len(ehlo_hook_params) == 5:
self._ehlo_hook_ver = "new"
else:
raise RuntimeError("Unsupported EHLO Hook")
self._smtp_methods: Dict[str, Any] = {
m.replace("smtp_", ""): getattr(self, m)
for m in dir(self)
if m.startswith("smtp_")
}
self._call_limit_default: int
if command_call_limit is None:
self._enforce_call_limit = False
else:
self._enforce_call_limit = True
if isinstance(command_call_limit, int):
self._call_limit_base = {}
self._call_limit_default = command_call_limit
elif isinstance(command_call_limit, dict):
if not all(map(is_int, command_call_limit.values())):
raise TypeError("All command_call_limit values must be int")
self._call_limit_base = command_call_limit
self._call_limit_default = command_call_limit.get(
"*", CALL_LIMIT_DEFAULT
)
else:
raise TypeError("command_call_limit must be int or Dict[str, int]")
def _create_session(self) -> Session:
return Session(self.loop)
def _create_envelope(self) -> Envelope:
return Envelope()
async def _call_handler_hook(self, command: str, *args) -> Any:
hook = self._handle_hooks.get(command)
if hook is None:
return MISSING
status = await hook(self, self.session, self.envelope, *args)
return status
@property
def max_command_size_limit(self) -> int:
try:
return max(self.command_size_limits.values())
except ValueError:
return self.command_size_limit
def __del__(self): # pragma: nocover
# This is nocover-ed because the contents *totally* does NOT affect function-
# ality, and in addition this comes directly from StreamReaderProtocol.__del__()
# but with a getattr()+check addition to stop the annoying (but harmless)
# "exception ignored" messages caused by AttributeError when self._closed is
# missing (which seems to happen randomly).
closed = getattr(self, "_closed", None)
if closed is None:
return
if closed.done() and not closed.cancelled():
closed.exception()
def connection_made(self, transport: asyncio.BaseTransport) -> None:
# Reset state due to rfc3207 part 4.2.
self._set_rset_state()
self.session = self._create_session()
self.session.peer = transport.get_extra_info('peername')
self._reset_timeout()
seen_starttls = (self._original_transport is not None)
if self.transport is not None and seen_starttls:
# It is STARTTLS connection over normal connection.
self._reader._transport = transport # type: ignore[attr-defined]
self._writer._transport = transport # type: ignore[attr-defined]
self.transport = transport
# Do SSL certificate checking as rfc3207 part 4.1 says. Why is
# _extra a protected attribute?
assert self._tls_protocol is not None
self.session.ssl = self._tls_protocol._extra
hook = self._handle_hooks.get("STARTTLS")
if hook is None:
self._tls_handshake_okay = True
else:
self._tls_handshake_okay = hook(
self, self.session, self.envelope)
else:
super().connection_made(transport)
self.transport = transport
log.info('Peer: %r', self.session.peer)
# Process the client's requests.
self._handler_coroutine = self.loop.create_task(
self._handle_client())
def connection_lost(self, error: Optional[Exception]) -> None:
assert self.session is not None
log.info('%r connection lost', self.session.peer)
assert self._timeout_handle is not None
self._timeout_handle.cancel()
# If STARTTLS was issued, then our transport is the SSL protocol
# transport, and we need to close the original transport explicitly,
# otherwise an unexpected eof_received() will be called *after* the
# connection_lost(). At that point the stream reader will already be
# destroyed and we'll get a traceback in super().eof_received() below.
if self._original_transport is not None:
self._original_transport.close()
super().connection_lost(error)
assert self._handler_coroutine is not None
self._handler_coroutine.cancel()
self.transport = None
def eof_received(self) -> Optional[bool]:
assert self.session is not None
log.info('%r EOF received', self.session.peer)
assert self._handler_coroutine is not None
self._handler_coroutine.cancel()
if self.session.ssl is not None:
# If STARTTLS was issued, return False, because True has no effect
# on an SSL transport and raises a warning. Our superclass has no
# way of knowing we switched to SSL so it might return True.
return False
return super().eof_received()
def _reset_timeout(self, duration: Optional[float] = None) -> None:
if self._timeout_handle is not None:
self._timeout_handle.cancel()
self._timeout_handle = self.loop.call_later(
duration or self._timeout_duration, self._timeout_cb
)
def _timeout_cb(self):
assert self.session is not None
log.info('%r connection timeout', self.session.peer)
# Calling close() on the transport will trigger connection_lost(),
# which gracefully closes the SSL transport if required and cleans
# up state.
assert self.transport is not None
self.transport.close()
def _cb_client_connected(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
):
# This is redundant since we subclass StreamReaderProtocol, but I like
# the shorter names.
self._reader = reader
self._writer = writer
def _set_post_data_state(self):
"""Reset state variables to their post-DATA state."""
self.envelope = self._create_envelope()
def _set_rset_state(self):
"""Reset all state variables except the greeting."""
self._set_post_data_state()
async def push(self, status: AnyStr):
if isinstance(status, str):
response = bytes(
status, 'utf-8' if self.enable_SMTPUTF8 else 'ascii')
else:
response = status
assert isinstance(response, bytes)
self._writer.write(response + b"\r\n")
assert self.session is not None
log.debug("%r << %r", self.session.peer, response)
await self._writer.drain()
async def handle_exception(self, error: Exception) -> str:
if hasattr(self.event_handler, 'handle_exception'):
status = await self.event_handler.handle_exception(error)
return status
else:
assert self.session is not None
log.exception('%r SMTP session exception', self.session.peer)
status = '500 Error: ({}) {}'.format(
error.__class__.__name__, str(error))
return status
async def _handle_client(self) -> None:
assert self.session is not None
log.info('%r handling connection', self.session.peer)
if self._proxy_timeout is not None:
self._reset_timeout(self._proxy_timeout)
log.debug("%r waiting PROXY handshake", self.session.peer)
self.session.proxy_data = await get_proxy(self._reader)
if self.session.proxy_data:
log.info("%r valid PROXY handshake", self.session.peer)
status = await self._call_handler_hook("PROXY", self.session.proxy_data)
log.debug("%r handle_PROXY returned %r", self.session.peer, status)
else:
log.warning("%r invalid PROXY handshake", self.session.peer)
status = False
if status is MISSING or not status:
log.info("%r rejected by handle_PROXY", self.session.peer)
assert self.transport is not None
self.transport.close()
return
self._reset_timeout()
await self.push('220 {} {}'.format(self.hostname, self.__ident__))
if self._enforce_call_limit:
call_limit: MutableMapping[str, int] = collections.defaultdict(
lambda: self._call_limit_default,
self._call_limit_base
)
else:
# Not used, but this silences code inspection tools
call_limit = {}
bogus_budget = BOGUS_LIMIT
while self.transport is not None: # pragma: nobranch
try:
try:
line: bytes = await self._reader.readuntil()
except asyncio.LimitOverrunError as error:
# Line too long. Read until end of line before sending 500.
await self._reader.read(error.consumed)
while True:
try:
await self._reader.readuntil()
break
except asyncio.LimitOverrunError as e:
# Line is even longer...
await self._reader.read(e.consumed)
continue
# Now that we have read a full line from the client,
# send error response and read the next command line.
await self.push('500 Command line too long')
continue
sanitized_log(log.debug, '_handle_client readline: %r', line)
# XXX this rstrip may not completely preserve old behavior.
line = line.rstrip(b'\r\n')
sanitized_log(log.info, '%r >> %r', self.session.peer, line)
if not line:
await self.push('500 Error: bad syntax')
continue
command_bytes, _, arg_bytes = line.partition(b" ")
# Decode to string only the command name part, which must be
# ASCII as per RFC. If there is an argument, it is decoded to
# UTF-8/surrogateescape so that non-UTF-8 data can be
# re-encoded back to the original bytes when the SMTP command
# is handled.
try:
command = command_bytes.upper().decode(encoding='ascii')
except UnicodeDecodeError:
await self.push('500 Error: bad syntax')
continue
if not arg_bytes:
arg: Optional[str] = None
else:
arg_bytes = arg_bytes.strip()
# Remote SMTP servers can send us UTF-8 content despite
# whether they've declared to do so or not. Some old
# servers can send 8-bit data. Use surrogateescape so
# that the fidelity of the decoding is preserved, and the
# original bytes can be retrieved.
if self.enable_SMTPUTF8:
arg = str(
arg_bytes, encoding='utf-8', errors='surrogateescape')
else:
try:
arg = str(arg_bytes, encoding='ascii', errors='strict')
except UnicodeDecodeError:
# This happens if enable_SMTPUTF8 is false, meaning
# that the server explicitly does not want to
# accept non-ASCII, but the client ignores that and
# sends non-ASCII anyway.
await self.push('500 Error: strict ASCII mode')
# Should we await self.handle_exception()?
continue
max_sz = (
self.command_size_limits[command]
if self.session.extended_smtp
else self.command_size_limit
)
if len(line) > max_sz:
await self.push('500 Command line too long')
continue
if not self._tls_handshake_okay and command != 'QUIT':
await self.push(
'554 Command refused due to lack of security')
continue
if (self.require_starttls
and not self._tls_protocol
and command not in ALLOWED_BEFORE_STARTTLS):
# RFC3207 part 4
await self.push('530 Must issue a STARTTLS command first')
continue
if self._enforce_call_limit:
budget = call_limit[command]
if budget < 1:
log.warning(
"%r over limit for %s", self.session.peer, command
)
await self.push(
f"421 4.7.0 {command} sent too many times"
)
self.transport.close()
continue
call_limit[command] = budget - 1
method = self._smtp_methods.get(command)
if method is None:
log.warning("%r unrecognised: %s", self.session.peer, command)
bogus_budget -= 1
if bogus_budget < 1:
log.warning("%r too many bogus commands", self.session.peer)
await self.push(
"502 5.5.1 Too many unrecognized commands, goodbye."
)
self.transport.close()
continue
await self.push(
f'500 Error: command "{command}" not recognized'
)
continue
# Received a valid command, reset the timer.
self._reset_timeout()
await method(arg)
except asyncio.CancelledError:
# The connection got reset during the DATA command.
# XXX If handler method raises ConnectionResetError, we should
# verify that it was actually self._reader that was reset.
log.info('%r Connection lost during _handle_client()',
self.session.peer)
self._writer.close()
raise
except ConnectionResetError:
log.info('%r Connection lost during _handle_client()',
self.session.peer)
self._writer.close()
raise
except Exception as error:
status = None
try:
status = await self.handle_exception(error)
except Exception as inner_error:
try:
log.exception('%r Exception in handle_exception()',
self.session.peer)
status = '500 Error: ({}) {}'.format(
inner_error.__class__.__name__, str(inner_error))
except Exception:
status = '500 Error: Cannot describe error'
finally:
if isinstance(error, TLSSetupException):
# This code branch is inside None check for self.transport
# so there shouldn't be a None self.transport but pytype
# still complains, so silence that error.
self.transport.close() # pytype: disable=attribute-error
self.connection_lost(error)
else:
# The value of status is being set with ex-except and it
# shouldn't be None, but pytype isn't able to infer that
# so ignore the error related to wrong argument types.
await self.push(status) # pytype: disable=wrong-arg-types
async def check_helo_needed(self, helo: str = "HELO") -> bool:
"""
Check if HELO/EHLO is needed.
:param helo: The actual string of HELO/EHLO
:return: True if HELO/EHLO is needed
"""
assert self.session is not None
if not self.session.host_name:
await self.push(f'503 Error: send {helo} first')
return True
return False
async def check_auth_needed(self, caller_method: str) -> bool:
"""
Check if AUTH is needed.
:param caller_method: The SMTP method needing a check (for logging)
:return: True if AUTH is needed
"""
assert self.session is not None
if self._auth_required and not self.session.authenticated:
log.info(f'{caller_method}: Authentication required')
await self.push('530 5.7.0 Authentication required')
return True
return False
# SMTP and ESMTP commands
@syntax('HELO hostname')
async def smtp_HELO(self, hostname: str):
if not hostname:
await self.push('501 Syntax: HELO hostname')
return
self._set_rset_state()
assert self.session is not None
self.session.extended_smtp = False
status = await self._call_handler_hook('HELO', hostname)
if status is MISSING:
self.session.host_name = hostname
status = '250 {}'.format(self.hostname)
await self.push(status)
@syntax('EHLO hostname')
async def smtp_EHLO(self, hostname: str):
if not hostname:
await self.push('501 Syntax: EHLO hostname')
return
response = ['250-' + self.hostname, ]
self._set_rset_state()
assert self.session is not None
self.session.extended_smtp = True
if self.data_size_limit:
response.append(f'250-SIZE {self.data_size_limit}')
self.command_size_limits['MAIL'] += 26
if not self._decode_data:
response.append('250-8BITMIME')
if self.enable_SMTPUTF8:
response.append('250-SMTPUTF8')
self.command_size_limits['MAIL'] += 10
if self.tls_context and not self._tls_protocol:
response.append('250-STARTTLS')
if not self._auth_require_tls or self._tls_protocol:
response.append(
"250-AUTH " + " ".join(sorted(self._auth_methods.keys()))
)
if hasattr(self, 'ehlo_hook'):
warn('Use handler.handle_EHLO() instead of .ehlo_hook()',
DeprecationWarning)
await self.ehlo_hook()
if self._ehlo_hook_ver is None:
self.session.host_name = hostname
response.append('250 HELP')
elif self._ehlo_hook_ver == "old":
# Old behavior: Send all responses first...
for r in response:
await self.push(r)
# ... then send the response from the hook.
response = [await self._call_handler_hook("EHLO", hostname)]
# (The hook might internally send its own responses.)
elif self._ehlo_hook_ver == "new": # pragma: nobranch
# New behavior: hand over list of responses so far to the hook, and
# REPLACE existing list of responses with what the hook returns.
# We will handle the push()ing
response.append('250 HELP')
response = await self._call_handler_hook("EHLO", hostname, response)
for r in response:
await self.push(r)
@syntax('NOOP [ignored]')
async def smtp_NOOP(self, arg: str):
status = await self._call_handler_hook('NOOP', arg)
await self.push('250 OK' if status is MISSING else status)
@syntax('QUIT')
async def smtp_QUIT(self, arg: str):
if arg:
await self.push('501 Syntax: QUIT')
else:
status = await self._call_handler_hook('QUIT')
await self.push('221 Bye' if status is MISSING else status)
assert self._handler_coroutine is not None
self._handler_coroutine.cancel()
assert self.transport is not None
self.transport.close()
@syntax('STARTTLS', when='tls_context')
async def smtp_STARTTLS(self, arg: str):
if arg:
await self.push('501 Syntax: STARTTLS')
return
if not self.tls_context:
await self.push('454 TLS not available')
return
await self.push('220 Ready to start TLS')
# Create a waiter Future to wait for SSL handshake to complete
waiter = self.loop.create_future()
# Create SSL layer.
# noinspection PyTypeChecker
self._tls_protocol = sslproto.SSLProtocol(
self.loop,
self,
self.tls_context,
waiter,
server_side=True)
# Reconfigure transport layer. Keep a reference to the original
# transport so that we can close it explicitly when the connection is
# lost.
self._original_transport = self.transport
assert self._original_transport is not None
self._original_transport.set_protocol(self._tls_protocol)
# Reconfigure the protocol layer. Why is the app transport a protected
# property, if it MUST be used externally?
self.transport = self._tls_protocol._app_transport
self._tls_protocol.connection_made(self._original_transport)
# wait until handshake complete
try:
await waiter
except asyncio.CancelledError:
raise
except Exception as error:
raise TLSSetupException() from error
@syntax("AUTH <mechanism>")
async def smtp_AUTH(self, arg: str) -> None:
if await self.check_helo_needed("EHLO"):
return
assert self.session is not None
if not self.session.extended_smtp:
await self.push("500 Error: command 'AUTH' not recognized")
return
elif self._auth_require_tls and not self._tls_protocol:
await self.push("538 5.7.11 Encryption required for requested "
"authentication mechanism")
return
elif self.session.authenticated:
await self.push('503 Already authenticated')
return
elif not arg:
await self.push('501 Not enough value')
return
args = arg.split()
if len(args) > 2:
await self.push('501 Too many values')
return
mechanism = args[0]
if mechanism not in self._auth_methods:
await self.push('504 5.5.4 Unrecognized authentication type')
return
CODE_SUCCESS = "235 2.7.0 Authentication successful"
CODE_INVALID = "535 5.7.8 Authentication credentials invalid"
status = await self._call_handler_hook('AUTH', args)
if status is MISSING:
auth_method = self._auth_methods[mechanism]
log.debug(
"Using %s auth_ hook for %r",
"builtin" if auth_method.is_builtin else "handler",
mechanism
)
# Pass 'self' to method so external methods can leverage this
# class's helper methods such as push()
auth_result = await auth_method.method(self, args)
log.debug("auth_%s returned %r", mechanism, auth_result)
# New system using `authenticator` and AuthResult
if isinstance(auth_result, AuthResult):
if auth_result.success:
self.session.authenticated = True
_auth_data = auth_result.auth_data
self.session.auth_data = _auth_data
# Custom mechanisms might not implement the "login" attribute, and
# that's okay.
self.session.login_data = getattr(_auth_data, "login", None)
status = auth_result.message or CODE_SUCCESS
else:
if auth_result.handled:
status = None
elif auth_result.message:
status = auth_result.message
else:
status = CODE_INVALID
# Old system using `auth_callback` and _TriState
elif auth_result is None:
# None means there's an error already handled by method and
# we don't need to do anything more
status = None
elif auth_result is MISSING or auth_result is False:
# MISSING means no error in AUTH process, but credentials
# is rejected / not valid
status = CODE_INVALID
else:
self.session.login_data = auth_result
status = CODE_SUCCESS
if status is not None: # pragma: no branch
await self.push(status)
async def challenge_auth(
self,
challenge: Union[str, bytes],
encode_to_b64: bool = True,
log_client_response: bool = False,
) -> Union[_Missing, bytes]:
"""
Send challenge during authentication. "334 " will be prefixed, so do NOT
put "334 " at start of server_message.
:param challenge: Challenge to send to client. If str, will be utf8-encoded.
:param encode_to_b64: If true, then perform Base64 encoding on challenge
:param log_client_response: Perform logging of client's response.
WARNING: Might cause leak of sensitive information! Do not turn on
unless _absolutely_ necessary!
:return: Response from client, or MISSING
"""
challenge = (
challenge.encode() if isinstance(challenge, str) else challenge
)
assert isinstance(challenge, bytes)
# Trailing space is MANDATORY even if challenge is empty.
# See:
# - https://tools.ietf.org/html/rfc4954#page-4 ¶ 5
# - https://tools.ietf.org/html/rfc4954#page-13 "continue-req"
challenge = b"334 " + (b64encode(challenge) if encode_to_b64 else challenge)
assert self.session is not None
log.debug("%r << challenge: %r", self.session.peer, challenge)
await self.push(challenge)
line = await self._reader.readline() # pytype: disable=attribute-error
if log_client_response:
warn("AUTH interaction logging is enabled!")
warn("Sensitive information might be leaked!")
log.debug("%r >> %r", self.session.peer, line)
blob: bytes = line.strip()
# '*' handling in accordance with RFC4954
if blob == b"*":
log.warning("%r aborted AUTH with '*'", self.session.peer)
await self.push("501 5.7.0 Auth aborted")
return MISSING
try:
decoded_blob = b64decode(blob, validate=True)
except binascii.Error:
log.debug("%r can't decode base64: %s", self.session.peer, blob)
await self.push("501 5.5.2 Can't decode base64")
return MISSING
return decoded_blob
_334_PREFIX = re.compile(r"^334 ")
async def _auth_interact(
self,
server_message: str
) -> Union[_Missing, bytes]: # pragma: nocover
warn(
"_auth_interact will be deprecated in version 2.0. "
"Please use challenge_auth() instead.",
DeprecationWarning
)
return await self.challenge_auth(
challenge=self._334_PREFIX.sub("", server_message),
encode_to_b64=False,
)
def _authenticate(self, mechanism: str, auth_data: Any) -> AuthResult:
if self._authenticator is not None:
# self.envelope is likely still empty, but we'll pass it anyways to
# make the invocation similar to the one in _call_handler_hook
assert self.session is not None
assert self.envelope is not None
return self._authenticator(
self, self.session, self.envelope, mechanism, auth_data
)
else:
assert self._auth_callback is not None
assert isinstance(auth_data, LoginPassword)
if self._auth_callback(mechanism, *auth_data):
return AuthResult(success=True, handled=True, auth_data=auth_data)
else:
return AuthResult(success=False, handled=False)
# IMPORTANT NOTES FOR THE auth_* METHODS
# ======================================
# Please note that there are two systems for return values in #2.
#
# 1. For internal methods, due to how they are called, we must ignore the first arg
# 2. (OLD SYSTEM) All auth_* methods can return one of three values:
# - None: An error happened and handled;
# smtp_AUTH should do nothing more
# - MISSING or False: Authentication failed, but not because of error
# - [Any]: Authentication succeeded and this is the 'identity' of
# the SMTP user
# - 'identity' is not always username, depending on the auth mecha-
# nism. Might be a session key, a one-time user ID, or any kind of
# object, actually.
# 2. (NEW SYSTEM) All auth_* methods must return an AuthResult object.
# For explanation on the object's attributes,
# see the AuthResult class definition.
# 3. Auth credentials checking is performed in the auth_* methods because
# more advanced auth mechanism might not return login+password pair
# (see #2 above)
async def auth_PLAIN(self, _, args: List[str]) -> AuthResult:
login_and_password: _TriStateType
if len(args) == 1:
login_and_password = await self.challenge_auth("")
if login_and_password is MISSING:
return AuthResult(success=False)
else:
try:
login_and_password = b64decode(args[1].encode(), validate=True)
except Exception:
await self.push("501 5.5.2 Can't decode base64")
return AuthResult(success=False, handled=True)
try:
# login data is "{authz_id}\x00{login_id}\x00{password}"
# authz_id can be null, and currently ignored
# See https://tools.ietf.org/html/rfc4616#page-3
_, login, password = login_and_password.split(b"\x00") # noqa: E501
except ValueError: # not enough args
await self.push("501 5.5.2 Can't split auth value")
return AuthResult(success=False, handled=True)
# Verify login data
assert login is not None
assert password is not None
return self._authenticate("PLAIN", LoginPassword(login, password))
async def auth_LOGIN(self, _, args: List[str]) -> AuthResult:
login: _TriStateType
if len(args) == 1:
# Client sent only "AUTH LOGIN"
login = await self.challenge_auth(self.AuthLoginUsernameChallenge)
if login is MISSING:
return AuthResult(success=False)
else:
# Client sent "AUTH LOGIN <b64-encoded-username>"
try:
login = b64decode(args[1].encode(), validate=True)
except Exception:
await self.push("501 5.5.2 Can't decode base64")
return AuthResult(success=False, handled=True)
assert login is not None
password: _TriStateType
password = await self.challenge_auth(self.AuthLoginPasswordChallenge)
if password is MISSING:
return AuthResult(success=False)
assert password is not None
return self._authenticate("LOGIN", LoginPassword(login, password))
def _strip_command_keyword(self, keyword: str, arg: str) -> Optional[str]:
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
return arg[keylen:].strip()
return None
def _getaddr(self, arg: str) -> Tuple[Optional[str], Optional[str]]:
"""
Try to parse address given in SMTP command.
Returns address=None if arg can't be parsed properly (get_angle_addr /
get_addr_spec raised HeaderParseError)
"""
class AddrSpec(Protocol):
@property
def addr_spec(self) -> str:
...
if not arg:
return '', ''
address: AddrSpec
try:
if arg.lstrip().startswith('<'):
address, rest = get_angle_addr(arg)
else:
address, rest = get_addr_spec(arg)
except HeaderParseError:
return None, None
addr = address.addr_spec
localpart, atsign, domainpart = addr.rpartition("@")
if self.local_part_limit and len(localpart) > self.local_part_limit:
return None, None
return addr, rest
def _getparams(
self, params: Sequence[str]
) -> Optional[Dict[str, Union[str, bool]]]:
# Return params as dictionary. Return None if not all parameters
# appear to be syntactically valid according to RFC 1869.
result: Dict[str, Union[str, bool]] = {}
for param in params:
param, eq, value = param.partition('=')
if not param.isalnum() or eq and not value:
return None
result[param] = value if eq else True
return result
# noinspection PyUnresolvedReferences
def _syntax_available(self, method: Callable) -> bool:
if not hasattr(method, '__smtp_syntax__'):
return False
if method.__smtp_syntax_when__: # type: ignore[attr-defined]
return bool(getattr(self, method.__smtp_syntax_when__)) # type: ignore[attr-defined]
return True
@syntax('HELP [command]')
async def smtp_HELP(self, arg: str) -> None:
if await self.check_auth_needed("HELP"):
return
code = 250
if arg:
method = self._smtp_methods.get(arg.upper())
if method and self._syntax_available(method):
help_str = method.__smtp_syntax__
assert self.session is not None
if (self.session.extended_smtp
and method.__smtp_syntax_extended__):
help_str += method.__smtp_syntax_extended__
await self.push('250 Syntax: ' + help_str)
return
code = 501
commands = []
for name, method in self._smtp_methods.items():
if self._syntax_available(method):
commands.append(name)
commands.sort()
await self.push(
'{} Supported commands: {}'.format(code, ' '.join(commands)))
@syntax('VRFY <address>')
async def smtp_VRFY(self, arg: str) -> None:
if await self.check_auth_needed("VRFY"):
return
if arg:
address, params = self._getaddr(arg)
if address is None:
await self.push('502 Could not VRFY ' + arg)
else:
status = await self._call_handler_hook('VRFY', address)
await self.push(
'252 Cannot VRFY user, but will accept message '
'and attempt delivery'
if status is MISSING else status)
else:
await self.push('501 Syntax: VRFY <address>')
@syntax('MAIL FROM: <address>', extended=' [SP <mail-parameters>]')
async def smtp_MAIL(self, arg: Optional[str]) -> None:
if await self.check_helo_needed():
return
if await self.check_auth_needed("MAIL"):
return
syntaxerr = '501 Syntax: MAIL FROM: <address>'
assert self.session is not None
if self.session.extended_smtp:
syntaxerr += ' [SP <mail-parameters>]'
if arg is None:
await self.push(syntaxerr)
return
arg = self._strip_command_keyword('FROM:', arg)
if arg is None:
await self.push(syntaxerr)
return
address, addrparams = self._getaddr(arg)
if address is None:
await self.push("553 5.1.3 Error: malformed address")
return
if not address:
await self.push(syntaxerr)
return
if not self.session.extended_smtp and addrparams:
await self.push(syntaxerr)
return
assert self.envelope is not None
if self.envelope.mail_from:
await self.push('503 Error: nested MAIL command')
return
assert addrparams is not None
mail_options = addrparams.upper().split()
params = self._getparams(mail_options)
if params is None:
await self.push(syntaxerr)
return
if not self._decode_data:
body = params.pop('BODY', '7BIT')
if body not in ['7BIT', '8BITMIME']:
await self.push(
'501 Error: BODY can only be one of 7BIT, 8BITMIME')
return
smtputf8 = params.pop('SMTPUTF8', False)
if not isinstance(smtputf8, bool):
await self.push('501 Error: SMTPUTF8 takes no arguments')
return
if smtputf8 and not self.enable_SMTPUTF8:
await self.push('501 Error: SMTPUTF8 disabled')
return
self.envelope.smtp_utf8 = smtputf8
size = params.pop('SIZE', None)
if size:
if isinstance(size, bool) or not size.isdigit():
await self.push(syntaxerr)
return
elif self.data_size_limit and int(size) > self.data_size_limit:
await self.push(
'552 Error: message size exceeds fixed maximum message '
'size')
return
if len(params) > 0:
await self.push(
'555 MAIL FROM parameters not recognized or not implemented')
return
status = await self._call_handler_hook('MAIL', address, mail_options)
if status is MISSING:
self.envelope.mail_from = address
self.envelope.mail_options.extend(mail_options)
status = '250 OK'
log.info('%r sender: %s', self.session.peer, address)
await self.push(status)
@syntax('RCPT TO: <address>', extended=' [SP <mail-parameters>]')
async def smtp_RCPT(self, arg: Optional[str]) -> None:
if await self.check_helo_needed():
return
if await self.check_auth_needed("RCPT"):
return
assert self.envelope is not None
if not self.envelope.mail_from:
await self.push("503 Error: need MAIL command")
return
syntaxerr = '501 Syntax: RCPT TO: <address>'
assert self.session is not None
if self.session.extended_smtp:
syntaxerr += ' [SP <mail-parameters>]'
if arg is None:
await self.push(syntaxerr)
return
arg = self._strip_command_keyword('TO:', arg)
if arg is None:
await self.push(syntaxerr)
return
address, params = self._getaddr(arg)
if address is None:
await self.push("553 5.1.3 Error: malformed address")
return
if not address:
await self.push(syntaxerr)
return
if not self.session.extended_smtp and params:
await self.push(syntaxerr)
return
assert params is not None
rcpt_options = params.upper().split()
params_dict = self._getparams(rcpt_options)
if params_dict is None:
await self.push(syntaxerr)
return
# XXX currently there are no options we recognize.
if len(params_dict) > 0:
await self.push(
'555 RCPT TO parameters not recognized or not implemented'
)
return
status = await self._call_handler_hook('RCPT', address, rcpt_options)
if status is MISSING:
self.envelope.rcpt_tos.append(address)
self.envelope.rcpt_options.extend(rcpt_options)
status = '250 OK'
log.info('%r recip: %s', self.session.peer, address)
await self.push(status)
@syntax('RSET')
async def smtp_RSET(self, arg: str):
if arg:
await self.push('501 Syntax: RSET')
return
self._set_rset_state()
if hasattr(self, 'rset_hook'):
warn('Use handler.handle_RSET() instead of .rset_hook()',
DeprecationWarning)
await self.rset_hook()
status = await self._call_handler_hook('RSET')
await self.push('250 OK' if status is MISSING else status)
@syntax('DATA')
async def smtp_DATA(self, arg: str) -> None:
if await self.check_helo_needed():
return
if await self.check_auth_needed("DATA"):
return
assert self.envelope is not None
if not self.envelope.rcpt_tos:
await self.push('503 Error: need RCPT command')
return
if arg:
await self.push('501 Syntax: DATA')
return
await self.push('354 End data with <CR><LF>.<CR><LF>')
data: List[bytearray] = []
num_bytes: int = 0
limit: Optional[int] = self.data_size_limit
line_fragments: List[bytes] = []
state: _DataState = _DataState.NOMINAL
while self.transport is not None: # pragma: nobranch
# Since eof_received cancels this coroutine,
# readuntil() can never raise asyncio.IncompleteReadError.
try:
line: bytes = await self._reader.readuntil()
log.debug('DATA readline: %s', line)
assert line.endswith(b'\n')
except asyncio.CancelledError:
# The connection got reset during the DATA command.
log.info('Connection lost during DATA')
self._writer.close()
raise
except asyncio.LimitOverrunError as e:
# The line exceeds StreamReader's "stream limit".
# Delay SMTP Status Code sending until data receive is complete
# This seems to be implied in RFC 5321 § 4.2.5
if state == _DataState.NOMINAL:
# Transition to TOO_LONG only if we haven't gone TOO_MUCH yet
state = _DataState.TOO_LONG
# Discard data immediately to prevent memory pressure
data *= 0
# Drain the stream anyways
line = await self._reader.read(e.consumed)
assert not line.endswith(b'\n')
# A lone dot in a line signals the end of DATA.
if not line_fragments and line == b'.\r\n':
break
num_bytes += len(line)
if state == _DataState.NOMINAL and limit and num_bytes > limit:
# Delay SMTP Status Code sending until data receive is complete
# This seems to be implied in RFC 5321 § 4.2.5
state = _DataState.TOO_MUCH
# Discard data immediately to prevent memory pressure
data *= 0
line_fragments.append(line)
if line.endswith(b'\n'):
# Record data only if state is "NOMINAL"
if state == _DataState.NOMINAL:
line = EMPTY_BARR.join(line_fragments)
if len(line) > self.line_length_limit:
# Theoretically we shouldn't reach this place. But it's always
# good to practice DEFENSIVE coding.
state = _DataState.TOO_LONG
# Discard data immediately to prevent memory pressure
data *= 0
else:
data.append(EMPTY_BARR.join(line_fragments))
line_fragments *= 0
# Day of reckoning! Let's take care of those out-of-nominal situations
if state != _DataState.NOMINAL:
if state == _DataState.TOO_LONG:
await self.push("500 Line too long (see RFC5321 4.5.3.1.6)")
elif state == _DataState.TOO_MUCH: # pragma: nobranch
await self.push('552 Error: Too much mail data')
self._set_post_data_state()
return
# If unfinished_line is non-empty, then the connection was closed.
assert not line_fragments
# Remove extraneous carriage returns and de-transparency
# according to RFC 5321, Section 4.5.2.
for text in data:
if text.startswith(b'.'):
del text[0]
original_content: bytes = EMPTYBYTES.join(data)
# Discard data immediately to prevent memory pressure
data *= 0
content: Union[str, bytes]
if self._decode_data:
if self.enable_SMTPUTF8:
content = original_content.decode('utf-8', errors='surrogateescape')
else:
try:
content = original_content.decode('ascii', errors='strict')
except UnicodeDecodeError:
# This happens if enable_smtputf8 is false, meaning that
# the server explicitly does not want to accept non-ascii,
# but the client ignores that and sends non-ascii anyway.
await self.push('500 Error: strict ASCII mode')
return
else:
content = original_content
self.envelope.content = content
self.envelope.original_content = original_content
# Call the new API first if it's implemented.
if "DATA" in self._handle_hooks:
status = await self._call_handler_hook('DATA')
else:
# Backward compatibility.
status = MISSING
if hasattr(self.event_handler, 'process_message'):
warn('Use handler.handle_DATA() instead of .process_message()',
DeprecationWarning)
assert self.session is not None
args = (self.session.peer, self.envelope.mail_from,
self.envelope.rcpt_tos, self.envelope.content)
if asyncio.iscoroutinefunction(
self.event_handler.process_message):
status = await self.event_handler.process_message(*args)
else:
status = self.event_handler.process_message(*args)
# The deprecated API can return None which means, return the
# default status. Don't worry about coverage for this case as
# it's a deprecated API that will go away after 1.0.
if status is None: # pragma: nocover
status = MISSING
self._set_post_data_state()
await self.push('250 OK' if status is MISSING else status)
# Commands that have not been implemented.
async def smtp_EXPN(self, arg: str):
await self.push('502 EXPN not implemented')
|
PYSEC-2024-221
|
arches/app/models/concept.py
|
@@ -32,6 +32,8 @@
from django.utils.translation import ugettext as _
from django.utils.translation import get_language
from django.db import IntegrityError
+from psycopg2.extensions import AsIs
+
import logging
@@ -505,13 +507,12 @@ def get_child_edges(
except:
return []
- languageid = get_language() if languageid is None else languageid
+ # this interpolation is safe because `relationtypes` is hardcoded in all calls, and not accessible via the API
relationtypes = " or ".join(["r.relationtype = '%s'" % (relationtype) for relationtype in relationtypes])
- depth_limit = "and depth < %s" % depth_limit if depth_limit else ""
- child_valuetypes = ("','").join(
- child_valuetypes if child_valuetypes else models.DValueType.objects.filter(category="label").values_list("valuetype", flat=True)
- )
- limit_clause = " limit %s offset %s" % (limit, offset) if offset is not None else ""
+ offset_clause = " limit %(limit)s offset %(offset)s" if offset else ""
+ depth_clause = " and depth < %(depth_limit)s" if depth_limit else ""
+
+ cursor = connection.cursor()
if order_hierarchically:
sql = """
@@ -525,9 +526,9 @@ def get_child_edges(
WHERE conceptid=r.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
- CASE WHEN languageid = '{languageid}' THEN 10
- WHEN languageid like '{short_languageid}%' THEN 5
- WHEN languageid like '{default_languageid}%' THEN 2
+ CASE WHEN languageid = %(languageid)s THEN 10
+ WHEN languageid like %(short_languageid)s THEN 5
+ WHEN languageid like %(default_languageid)s THEN 2
ELSE 0
END
) desc limit 1
@@ -547,8 +548,8 @@ def get_child_edges(
limit 1
) as collector
FROM relations r
- WHERE r.conceptidfrom = '{conceptid}'
- and ({relationtypes})
+ WHERE r.conceptidfrom = %(conceptid)s
+ and (%(relationtypes)s)
ORDER BY sortorder, valuesto
)
UNION
@@ -559,9 +560,9 @@ def get_child_edges(
WHERE conceptid=r.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
- CASE WHEN languageid = '{languageid}' THEN 10
- WHEN languageid like '{short_languageid}%' THEN 5
- WHEN languageid like '{default_languageid}%' THEN 2
+ CASE WHEN languageid = %(languageid)s THEN 10
+ WHEN languageid like %(short_languageid)s THEN 5
+ WHEN languageid like %(default_languageid)s THEN 2
ELSE 0
END
) desc limit 1
@@ -582,7 +583,7 @@ def get_child_edges(
) as collector
FROM relations r
JOIN ordered_relationships b ON(b.conceptidto = r.conceptidfrom)
- WHERE ({relationtypes})
+ WHERE (%(relationtypes)s)
ORDER BY sortorder, valuesto
)
),
@@ -593,17 +594,17 @@ def get_child_edges(
r.collector,
1 AS depth ---|NonRecursive Part
FROM ordered_relationships r
- WHERE r.conceptidfrom = '{conceptid}'
- and ({relationtypes})
+ WHERE r.conceptidfrom = %(conceptid)s
+ and (%(relationtypes)s)
UNION
SELECT r.conceptidfrom, r.conceptidto,
row || '-' || to_char(row_number() OVER (), 'fm000000'),
r.collector,
depth+1 ---|RecursivePart
FROM ordered_relationships r
JOIN children b ON(b.conceptidto = r.conceptidfrom)
- WHERE ({relationtypes})
- {depth_limit}
+ WHERE (%(relationtypes)s)
+ {depth_clause}
)
{subquery}
@@ -614,70 +615,73 @@ def get_child_edges(
FROM (
SELECT *
FROM values
- WHERE conceptid={recursive_table}.conceptidto
+ WHERE conceptid=%(recursive_table)s.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
- CASE WHEN languageid = '{languageid}' THEN 10
- WHEN languageid like '{short_languageid}%' THEN 5
- WHEN languageid like '{default_languageid}%' THEN 2
+ CASE WHEN languageid = %(languageid)s THEN 10
+ WHEN languageid like %(short_languageid)s THEN 5
+ WHEN languageid like %(default_languageid)s THEN 2
ELSE 0
END
) desc limit 1
) d
) as valueto,
depth, collector, count(*) OVER() AS full_count
- FROM {recursive_table} order by row {limit_clause};
-
+ FROM %(recursive_table)s order by row {offset_clause};
"""
- subquery = (
- """, results as (
- SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
- FROM children c
- JOIN values ON(values.conceptid = c.conceptidto)
- WHERE LOWER(values.value) like '%%%s%%'
- AND values.valuetype in ('prefLabel')
- UNION
- SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
- FROM children c
- JOIN results r on (r.conceptidfrom=c.conceptidto)
- )"""
- % query.lower()
- if query is not None
- else ""
- )
+ if query:
+ subquery = """
+ , results as (
+ SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
+ FROM children c
+ JOIN values ON(values.conceptid = c.conceptidto)
+ WHERE LOWER(values.value) like %(query)s
+ AND values.valuetype in ('prefLabel')
+ UNION
+ SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
+ FROM children c
+ JOIN results r on (r.conceptidfrom=c.conceptidto)
+ )
+ """
+ else:
+ subquery = ""
+
+ sql = sql.format(subquery=subquery, offset_clause=offset_clause, depth_clause=depth_clause)
recursive_table = "results" if query else "children"
+ languageid = get_language() if languageid is None else languageid
- sql = sql.format(
- conceptid=conceptid,
- relationtypes=relationtypes,
- child_valuetypes=child_valuetypes,
- parent_valuetype=parent_valuetype,
- depth_limit=depth_limit,
- limit_clause=limit_clause,
- subquery=subquery,
- recursive_table=recursive_table,
- languageid=languageid,
- short_languageid=languageid.split("-")[0],
- default_languageid=settings.LANGUAGE_CODE,
+ cursor.execute(
+ sql,
+ {
+ "conceptid": conceptid,
+ "relationtypes": AsIs(relationtypes),
+ "depth_limit": depth_limit,
+ "limit": limit,
+ "offset": offset,
+ "query": "%" + query.lower() + "%",
+ "recursive_table": AsIs(recursive_table),
+ "languageid": languageid,
+ "short_languageid": languageid.split("-")[0] + "%",
+ "default_languageid": settings.LANGUAGE_CODE + "%",
+ },
)
-
else:
sql = """
WITH RECURSIVE
children AS (
SELECT r.conceptidfrom, r.conceptidto, r.relationtype, 1 AS depth
FROM relations r
- WHERE r.conceptidfrom = '{conceptid}'
- AND ({relationtypes})
+ WHERE r.conceptidfrom = %(conceptid)s
+ AND (%(relationtypes)s)
UNION
SELECT r.conceptidfrom, r.conceptidto, r.relationtype, depth+1
FROM relations r
JOIN children c ON(c.conceptidto = r.conceptidfrom)
- WHERE ({relationtypes})
- {depth_limit}
+ WHERE (%(relationtypes)s)
+ {depth_clause}
),
results AS (
SELECT
@@ -692,14 +696,15 @@ def get_child_edges(
JOIN children c ON(c.conceptidto = valueto.conceptid)
JOIN values valuefrom ON(c.conceptidfrom = valuefrom.conceptid)
JOIN d_value_types dtypesfrom ON(dtypesfrom.valuetype = valuefrom.valuetype)
- WHERE valueto.valuetype in ('{child_valuetypes}')
- AND valuefrom.valuetype in ('{child_valuetypes}')
+ WHERE valueto.valuetype in (%(child_valuetypes)s)
+ AND valuefrom.valuetype in (%(child_valuetypes)s)
)
- SELECT distinct {columns}
- FROM results {limit_clause}
-
+ SELECT distinct %(columns)s
+ FROM results {offset_clause}
"""
+ sql = sql.format(offset_clause=offset_clause, depth_clause=depth_clause)
+
if not columns:
columns = """
conceptidfrom::text, conceptidto::text,
@@ -710,19 +715,24 @@ def get_child_edges(
categoryfrom, categoryto
"""
- sql = sql.format(
- conceptid=conceptid,
- relationtypes=relationtypes,
- child_valuetypes=child_valuetypes,
- columns=columns,
- depth_limit=depth_limit,
- limit_clause=limit_clause,
+ cursor.execute(
+ sql,
+ {
+ "conceptid": conceptid,
+ "relationtypes": AsIs(relationtypes),
+ "child_valuetypes": ("','").join(
+ child_valuetypes
+ if child_valuetypes
+ else models.DValueType.objects.filter(category="label").values_list("valuetype", flat=True)
+ ),
+ "columns": AsIs(columns),
+ "depth_limit": depth_limit,
+ "limit": limit,
+ "offset": offset,
+ },
)
- cursor = connection.cursor()
- cursor.execute(sql)
- rows = cursor.fetchall()
- return rows
+ return cursor.fetchall()
def traverse(self, func, direction="down", scope=None, **kwargs):
"""
@@ -1176,30 +1186,31 @@ def get_e55_domain(self, conceptid):
"""
cursor = connection.cursor()
-
- sql = """
- WITH RECURSIVE children AS (
- SELECT d.conceptidfrom, d.conceptidto, c2.value, c2.valueid as valueid, c.value as valueto, c.valueid as valueidto, c.valuetype as vtype, 1 AS depth, array[d.conceptidto] AS conceptpath, array[c.valueid] AS idpath ---|NonRecursive Part
- FROM relations d
- JOIN values c ON(c.conceptid = d.conceptidto)
- JOIN values c2 ON(c2.conceptid = d.conceptidfrom)
- WHERE d.conceptidfrom = '{0}'
- and c2.valuetype = 'prefLabel'
- and c.valuetype in ('prefLabel', 'sortorder', 'collector')
- and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
- UNION
- SELECT d.conceptidfrom, d.conceptidto, v2.value, v2.valueid as valueid, v.value as valueto, v.valueid as valueidto, v.valuetype as vtype, depth+1, (conceptpath || d.conceptidto), (idpath || v.valueid) ---|RecursivePart
- FROM relations d
- JOIN children b ON(b.conceptidto = d.conceptidfrom)
- JOIN values v ON(v.conceptid = d.conceptidto)
- JOIN values v2 ON(v2.conceptid = d.conceptidfrom)
- WHERE v2.valuetype = 'prefLabel'
- and v.valuetype in ('prefLabel','sortorder', 'collector')
- and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
- ) SELECT conceptidfrom::text, conceptidto::text, value, valueid::text, valueto, valueidto::text, depth, idpath::text, conceptpath::text, vtype FROM children ORDER BY depth, conceptpath;
- """.format(
- conceptid
+ cursor.execute(
+ """
+ WITH RECURSIVE children AS (
+ SELECT d.conceptidfrom, d.conceptidto, c2.value, c2.valueid as valueid, c.value as valueto, c.valueid as valueidto, c.valuetype as vtype, 1 AS depth, array[d.conceptidto] AS conceptpath, array[c.valueid] AS idpath ---|NonRecursive Part
+ FROM relations d
+ JOIN values c ON(c.conceptid = d.conceptidto)
+ JOIN values c2 ON(c2.conceptid = d.conceptidfrom)
+ WHERE d.conceptidfrom = %s
+ and c2.valuetype = 'prefLabel'
+ and c.valuetype in ('prefLabel', 'sortorder', 'collector')
+ and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
+ UNION
+ SELECT d.conceptidfrom, d.conceptidto, v2.value, v2.valueid as valueid, v.value as valueto, v.valueid as valueidto, v.valuetype as vtype, depth+1, (conceptpath || d.conceptidto), (idpath || v.valueid) ---|RecursivePart
+ FROM relations d
+ JOIN children b ON(b.conceptidto = d.conceptidfrom)
+ JOIN values v ON(v.conceptid = d.conceptidto)
+ JOIN values v2 ON(v2.conceptid = d.conceptidfrom)
+ WHERE v2.valuetype = 'prefLabel'
+ and v.valuetype in ('prefLabel','sortorder', 'collector')
+ and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
+ ) SELECT conceptidfrom::text, conceptidto::text, value, valueid::text, valueto, valueidto::text, depth, idpath::text, conceptpath::text, vtype FROM children ORDER BY depth, conceptpath;
+ """,
+ [conceptid],
)
+ rows = cursor.fetchall()
column_names = [
"conceptidfrom",
@@ -1213,8 +1224,6 @@ def get_e55_domain(self, conceptid):
"conceptpath",
"vtype",
]
- cursor.execute(sql)
- rows = cursor.fetchall()
class Val(object):
def __init__(self, conceptid):
|
"""
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import uuid
import copy
from operator import itemgetter
from operator import methodcaller
from django.db import transaction, connection
from django.db.models import Q
from arches.app.models import models
from arches.app.models.system_settings import settings
from arches.app.search.search_engine_factory import SearchEngineInstance as se
from arches.app.search.elasticsearch_dsl_builder import Term, Query, Bool, Match, Terms
from arches.app.search.mappings import CONCEPTS_INDEX
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
from django.utils.translation import ugettext as _
from django.utils.translation import get_language
from django.db import IntegrityError
import logging
logger = logging.getLogger(__name__)
CORE_CONCEPTS = (
"00000000-0000-0000-0000-000000000001",
"00000000-0000-0000-0000-000000000004",
"00000000-0000-0000-0000-000000000005",
"00000000-0000-0000-0000-000000000006",
)
class Concept(object):
def __init__(self, *args, **kwargs):
self.id = ""
self.nodetype = ""
self.legacyoid = ""
self.relationshiptype = ""
self.values = []
self.subconcepts = []
self.parentconcepts = []
self.relatedconcepts = []
self.hassubconcepts = False
if len(args) != 0:
if isinstance(args[0], str):
try:
uuid.UUID(args[0])
self.get(args[0])
except (ValueError):
self.load(JSONDeserializer().deserialize(args[0]))
elif isinstance(args[0], dict):
self.load(args[0])
elif isinstance(args[0], object):
self.load(args[0])
def __unicode__(self):
return ("%s - %s") % (self.get_preflabel().value, self.id)
def __hash__(self):
return hash(self.id)
def __eq__(self, x):
return hash(self) == hash(x)
def __ne__(self, x):
return hash(self) != hash(x)
def load(self, value):
if isinstance(value, dict):
self.id = str(value["id"]) if "id" in value else ""
self.nodetype = value["nodetype"] if "nodetype" in value else ""
self.legacyoid = value["legacyoid"] if "legacyoid" in value else ""
self.relationshiptype = value["relationshiptype"] if "relationshiptype" in value else ""
if "values" in value:
for val in value["values"]:
self.addvalue(val)
if "subconcepts" in value:
for subconcept in value["subconcepts"]:
self.addsubconcept(subconcept)
if "parentconcepts" in value:
for parentconcept in value["parentconcepts"]:
self.addparent(parentconcept)
if "relatedconcepts" in value:
for relatedconcept in value["relatedconcepts"]:
self.addrelatedconcept(relatedconcept)
if isinstance(value, models.Concept):
self.id = str(value.pk)
self.nodetype = value.nodetype_id
self.legacyoid = value.legacyoid
def get(
self,
id="",
legacyoid="",
include_subconcepts=False,
include_parentconcepts=False,
include_relatedconcepts=False,
exclude=[],
include=[],
depth_limit=None,
up_depth_limit=None,
lang=settings.LANGUAGE_CODE,
semantic=True,
pathway_filter=None,
**kwargs,
):
if id != "":
self.load(models.Concept.objects.get(pk=id))
elif legacyoid != "":
self.load(models.Concept.objects.get(legacyoid=legacyoid))
_cache = kwargs.pop("_cache", {})
_cache[self.id] = self.__class__(
{"id": self.id, "nodetype": self.nodetype, "legacyoid": self.legacyoid, "relationshiptype": self.relationshiptype}
)
if semantic == True:
pathway_filter = (
pathway_filter
if pathway_filter
else Q(relationtype__category="Semantic Relations") | Q(relationtype__category="Properties")
)
else:
pathway_filter = pathway_filter if pathway_filter else Q(relationtype="member") | Q(relationtype="hasCollection")
if self.id != "":
nodetype = kwargs.pop("nodetype", self.nodetype)
uplevel = kwargs.pop("uplevel", 0)
downlevel = kwargs.pop("downlevel", 0)
depth_limit = depth_limit if depth_limit is None else int(depth_limit)
up_depth_limit = up_depth_limit if up_depth_limit is None else int(up_depth_limit)
if include is not None:
if len(include) > 0 and len(exclude) > 0:
raise Exception(_("Only include values for include or exclude, but not both"))
include = (
include if len(include) != 0 else models.DValueType.objects.distinct("category").values_list("category", flat=True)
)
include = set(include).difference(exclude)
exclude = []
if len(include) > 0:
values = models.Value.objects.filter(concept=self.id)
for value in values:
if value.valuetype.category in include:
self.values.append(ConceptValue(value))
hassubconcepts = models.Relation.objects.filter(Q(conceptfrom=self.id), pathway_filter, ~Q(relationtype="related"))[0:1]
if len(hassubconcepts) > 0:
self.hassubconcepts = True
if include_subconcepts:
conceptrealations = models.Relation.objects.filter(Q(conceptfrom=self.id), pathway_filter, ~Q(relationtype="related"))
if depth_limit is None or downlevel < depth_limit:
if depth_limit is not None:
downlevel = downlevel + 1
for relation in conceptrealations:
subconcept = (
_cache[str(relation.conceptto_id)]
if str(relation.conceptto_id) in _cache
else self.__class__().get(
id=relation.conceptto_id,
include_subconcepts=include_subconcepts,
include_parentconcepts=include_parentconcepts,
include_relatedconcepts=include_relatedconcepts,
exclude=exclude,
include=include,
depth_limit=depth_limit,
up_depth_limit=up_depth_limit,
downlevel=downlevel,
uplevel=uplevel,
nodetype=nodetype,
semantic=semantic,
pathway_filter=pathway_filter,
_cache=_cache.copy(),
lang=lang,
)
)
subconcept.relationshiptype = relation.relationtype_id
self.subconcepts.append(subconcept)
self.subconcepts = sorted(
self.subconcepts, key=lambda concept: self.natural_keys(concept.get_sortkey(lang)), reverse=False
)
# self.subconcepts = sorted(self.subconcepts, key=methodcaller(
# 'get_sortkey', lang=lang), reverse=False)
if include_parentconcepts:
conceptrealations = models.Relation.objects.filter(Q(conceptto=self.id), pathway_filter, ~Q(relationtype="related"))
if up_depth_limit is None or uplevel < up_depth_limit:
if up_depth_limit is not None:
uplevel = uplevel + 1
for relation in conceptrealations:
parentconcept = (
_cache[str(relation.conceptfrom_id)]
if str(relation.conceptfrom_id) in _cache
else self.__class__().get(
id=relation.conceptfrom_id,
include_subconcepts=False,
include_parentconcepts=include_parentconcepts,
include_relatedconcepts=include_relatedconcepts,
exclude=exclude,
include=include,
depth_limit=depth_limit,
up_depth_limit=up_depth_limit,
downlevel=downlevel,
uplevel=uplevel,
nodetype=nodetype,
semantic=semantic,
pathway_filter=pathway_filter,
_cache=_cache.copy(),
lang=lang,
)
)
parentconcept.relationshiptype = relation.relationtype_id
self.parentconcepts.append(parentconcept)
if include_relatedconcepts:
conceptrealations = models.Relation.objects.filter(
Q(relationtype="related") | Q(relationtype__category="Mapping Properties"),
Q(conceptto=self.id) | Q(conceptfrom=self.id),
)
relations = []
for relation in conceptrealations:
if str(relation.conceptto_id) != self.id and str(relation.relationid) not in relations:
relations.append(str(relation.relationid))
relatedconcept = self.__class__().get(relation.conceptto_id, include=["label"], lang=lang)
relatedconcept.relationshiptype = relation.relationtype_id
self.relatedconcepts.append(relatedconcept)
if str(relation.conceptfrom_id) != self.id and str(relation.relationid) not in relations:
relations.append(str(relation.relationid))
relatedconcept = self.__class__().get(relation.conceptfrom_id, include=["label"], lang=lang)
relatedconcept.relationshiptype = relation.relationtype_id
self.relatedconcepts.append(relatedconcept)
return self
def save(self):
self.id = self.id if (self.id != "" and self.id is not None) else str(uuid.uuid4())
concept, created = models.Concept.objects.get_or_create(
pk=self.id, defaults={"legacyoid": self.legacyoid if self.legacyoid != "" else self.id, "nodetype_id": self.nodetype}
)
for value in self.values:
if not isinstance(value, ConceptValue):
value = ConceptValue(value)
value.conceptid = self.id
value.save()
for parentconcept in self.parentconcepts:
parentconcept.save()
parentconcept.add_relation(self, parentconcept.relationshiptype)
for subconcept in self.subconcepts:
subconcept.save()
self.add_relation(subconcept, subconcept.relationshiptype)
# if we're moving a Concept Scheme below another Concept or Concept Scheme
if len(self.parentconcepts) > 0 and concept.nodetype_id == "ConceptScheme":
concept.nodetype_id = "Concept"
concept.save()
self.load(concept)
for relation in models.Relation.objects.filter(conceptfrom=concept, relationtype_id="hasTopConcept"):
relation.relationtype_id = "narrower"
relation.save()
for relatedconcept in self.relatedconcepts:
self.add_relation(relatedconcept, relatedconcept.relationshiptype)
if relatedconcept.relationshiptype == "member":
child_concepts = relatedconcept.get(include_subconcepts=True)
def applyRelationship(concept):
for subconcept in concept.subconcepts:
concept.add_relation(subconcept, relatedconcept.relationshiptype)
child_concepts.traverse(applyRelationship)
return concept
def delete(self, delete_self=False):
"""
Deletes any subconcepts associated with this concept and additionally this concept if 'delete_self' is True
If any parentconcepts or relatedconcepts are included then it will only delete the relationship to those concepts but not the concepts themselves
If any values are passed, then those values as well as the relationship to those values will be deleted
Note, django will automatically take care of deleting any db models that have a foreign key relationship to the model being deleted
(eg: deleting a concept model will also delete all values and relationships), but because we need to manage deleting
parent concepts and related concepts and values we have to do that here too
"""
for subconcept in self.subconcepts:
concepts_to_delete = Concept.gather_concepts_to_delete(subconcept)
for key, concept in concepts_to_delete.items():
models.Concept.objects.get(pk=key).delete()
for parentconcept in self.parentconcepts:
relations_filter = (
(Q(relationtype__category="Semantic Relations") | Q(relationtype="hasTopConcept"))
& Q(conceptfrom=parentconcept.id)
& Q(conceptto=self.id)
)
conceptrelations = models.Relation.objects.filter(relations_filter)
for relation in conceptrelations:
relation.delete()
if models.Relation.objects.filter(relations_filter).count() == 0:
# we've removed all parent concepts so now this concept needs to be promoted to a Concept Scheme
concept = models.Concept.objects.get(pk=self.id)
concept.nodetype_id = "ConceptScheme"
concept.save()
self.load(concept)
for relation in models.Relation.objects.filter(conceptfrom=concept, relationtype_id="narrower"):
relation.relationtype_id = "hasTopConcept"
relation.save()
deletedrelatedconcepts = []
for relatedconcept in self.relatedconcepts:
conceptrelations = models.Relation.objects.filter(
Q(relationtype="related") | Q(relationtype="member") | Q(relationtype__category="Mapping Properties"),
conceptto=relatedconcept.id,
conceptfrom=self.id,
)
for relation in conceptrelations:
relation.delete()
deletedrelatedconcepts.append(relatedconcept)
conceptrelations = models.Relation.objects.filter(
Q(relationtype="related") | Q(relationtype="member") | Q(relationtype__category="Mapping Properties"),
conceptfrom=relatedconcept.id,
conceptto=self.id,
)
for relation in conceptrelations:
relation.delete()
deletedrelatedconcepts.append(relatedconcept)
for deletedrelatedconcept in deletedrelatedconcepts:
if deletedrelatedconcept in self.relatedconcepts:
self.relatedconcepts.remove(deletedrelatedconcept)
for value in self.values:
if not isinstance(value, ConceptValue):
value = ConceptValue(value)
value.delete()
if delete_self:
concepts_to_delete = Concept.gather_concepts_to_delete(self)
for key, concept in concepts_to_delete.items():
# delete only member relationships if the nodetype == Collection
if concept.nodetype == "Collection":
concept = Concept().get(
id=concept.id,
include_subconcepts=True,
include_parentconcepts=True,
include=["label"],
up_depth_limit=1,
semantic=False,
)
def find_concepts(concept):
if len(concept.parentconcepts) <= 1:
for subconcept in concept.subconcepts:
conceptrelation = models.Relation.objects.get(
conceptfrom=concept.id, conceptto=subconcept.id, relationtype="member"
)
conceptrelation.delete()
find_concepts(subconcept)
find_concepts(concept)
# if the concept is a collection, loop through the nodes and delete their rdmCollection values
for node in models.Node.objects.filter(config__rdmCollection=concept.id):
node.config["rdmCollection"] = None
node.save()
models.Concept.objects.get(pk=key).delete()
return
def add_relation(self, concepttorelate, relationtype):
"""
Relates this concept to 'concepttorelate' via the relationtype
"""
relation, created = models.Relation.objects.get_or_create(
conceptfrom_id=self.id, conceptto_id=concepttorelate.id, relationtype_id=relationtype
)
return relation
@staticmethod
def gather_concepts_to_delete(concept, lang=settings.LANGUAGE_CODE):
"""
Gets a dictionary of all the concepts ids to delete
The values of the dictionary keys differ somewhat depending on the node type being deleted
If the nodetype == 'Concept' then return ConceptValue objects keyed to the concept id
If the nodetype == 'ConceptScheme' then return a ConceptValue object with the value set to any ONE prefLabel keyed to the concept id
We do this because it takes so long to gather the ids of the concepts when deleting a Scheme or Group
"""
concepts_to_delete = {}
# Here we have to worry about making sure we don't delete nodes that have more than 1 parent
if concept.nodetype == "Concept":
concept = Concept().get(
id=concept.id, include_subconcepts=True, include_parentconcepts=True, include=["label"], up_depth_limit=1
)
def find_concepts(concept):
if len(concept.parentconcepts) <= 1:
concepts_to_delete[concept.id] = concept
for subconcept in concept.subconcepts:
find_concepts(subconcept)
find_concepts(concept)
return concepts_to_delete
# here we can just delete everything and so use a recursive CTE to get the concept ids much more quickly
if concept.nodetype == "ConceptScheme":
concepts_to_delete[concept.id] = concept
rows = Concept().get_child_concepts(concept.id)
for row in rows:
if row[0] not in concepts_to_delete:
concepts_to_delete[row[0]] = Concept({"id": row[0]})
concepts_to_delete[row[0]].addvalue({"id": row[2], "conceptid": row[0], "value": row[1]})
if concept.nodetype == "Collection":
concepts_to_delete[concept.id] = concept
rows = Concept().get_child_collections(concept.id)
for row in rows:
if row[0] not in concepts_to_delete:
concepts_to_delete[row[0]] = Concept({"id": row[0]})
concepts_to_delete[row[0]].addvalue({"id": row[2], "conceptid": row[0], "value": row[1]})
return concepts_to_delete
def get_child_collections_hierarchically(self, conceptid, child_valuetypes=None, offset=0, limit=50, query=None):
child_valuetypes = child_valuetypes if child_valuetypes else ["prefLabel"]
columns = "valueidto::text, conceptidto::text, valueto, valuetypeto, depth, count(*) OVER() AS full_count, collector"
return self.get_child_edges(
conceptid, ["member"], child_valuetypes, offset=offset, limit=limit, order_hierarchically=True, query=query, columns=columns
)
def get_child_collections(self, conceptid, child_valuetypes=None, parent_valuetype="prefLabel", columns=None, depth_limit=""):
child_valuetypes = child_valuetypes if child_valuetypes else ["prefLabel"]
columns = columns if columns else "conceptidto::text, valueto, valueidto::text"
return self.get_child_edges(conceptid, ["member"], child_valuetypes, parent_valuetype, columns, depth_limit)
def get_child_concepts(self, conceptid, child_valuetypes=None, parent_valuetype="prefLabel", columns=None, depth_limit=""):
columns = columns if columns else "conceptidto::text, valueto, valueidto::text"
return self.get_child_edges(conceptid, ["narrower", "hasTopConcept"], child_valuetypes, parent_valuetype, columns, depth_limit)
def get_child_concepts_for_indexing(self, conceptid, child_valuetypes=None, parent_valuetype="prefLabel", depth_limit=""):
columns = "valueidto::text, conceptidto::text, valuetypeto, categoryto, valueto, languageto"
data = self.get_child_edges(conceptid, ["narrower", "hasTopConcept"], child_valuetypes, parent_valuetype, columns, depth_limit)
return [dict(list(zip(["id", "conceptid", "type", "category", "value", "language"], d)), top_concept="") for d in data]
def get_child_edges(
self,
conceptid,
relationtypes,
child_valuetypes=None,
parent_valuetype="prefLabel",
columns=None,
depth_limit=None,
offset=None,
limit=20,
order_hierarchically=False,
query=None,
languageid=None,
):
"""
Recursively builds a list of concept relations for a given concept and all it's subconcepts based on its relationship type and valuetypes.
"""
# if the conceptid isn't a UUID then Postgres will throw an error and transactions will be aborted #7822
try:
uuid.UUID(conceptid)
except:
return []
languageid = get_language() if languageid is None else languageid
relationtypes = " or ".join(["r.relationtype = '%s'" % (relationtype) for relationtype in relationtypes])
depth_limit = "and depth < %s" % depth_limit if depth_limit else ""
child_valuetypes = ("','").join(
child_valuetypes if child_valuetypes else models.DValueType.objects.filter(category="label").values_list("valuetype", flat=True)
)
limit_clause = " limit %s offset %s" % (limit, offset) if offset is not None else ""
if order_hierarchically:
sql = """
WITH RECURSIVE
ordered_relationships AS (
(
SELECT r.conceptidfrom, r.conceptidto, r.relationtype, (
SELECT value
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
CASE WHEN languageid = '{languageid}' THEN 10
WHEN languageid like '{short_languageid}%' THEN 5
WHEN languageid like '{default_languageid}%' THEN 2
ELSE 0
END
) desc limit 1
) as valuesto,
(
SELECT value::int
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('sortorder')
limit 1
) as sortorder,
(
SELECT value
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('collector')
limit 1
) as collector
FROM relations r
WHERE r.conceptidfrom = '{conceptid}'
and ({relationtypes})
ORDER BY sortorder, valuesto
)
UNION
(
SELECT r.conceptidfrom, r.conceptidto, r.relationtype,(
SELECT value
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
CASE WHEN languageid = '{languageid}' THEN 10
WHEN languageid like '{short_languageid}%' THEN 5
WHEN languageid like '{default_languageid}%' THEN 2
ELSE 0
END
) desc limit 1
) as valuesto,
(
SELECT value::int
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('sortorder')
limit 1
) as sortorder,
(
SELECT value
FROM values
WHERE conceptid=r.conceptidto
AND valuetype in ('collector')
limit 1
) as collector
FROM relations r
JOIN ordered_relationships b ON(b.conceptidto = r.conceptidfrom)
WHERE ({relationtypes})
ORDER BY sortorder, valuesto
)
),
children AS (
SELECT r.conceptidfrom, r.conceptidto,
to_char(row_number() OVER (), 'fm000000') as row,
r.collector,
1 AS depth ---|NonRecursive Part
FROM ordered_relationships r
WHERE r.conceptidfrom = '{conceptid}'
and ({relationtypes})
UNION
SELECT r.conceptidfrom, r.conceptidto,
row || '-' || to_char(row_number() OVER (), 'fm000000'),
r.collector,
depth+1 ---|RecursivePart
FROM ordered_relationships r
JOIN children b ON(b.conceptidto = r.conceptidfrom)
WHERE ({relationtypes})
{depth_limit}
)
{subquery}
SELECT
(
select row_to_json(d)
FROM (
SELECT *
FROM values
WHERE conceptid={recursive_table}.conceptidto
AND valuetype in ('prefLabel')
ORDER BY (
CASE WHEN languageid = '{languageid}' THEN 10
WHEN languageid like '{short_languageid}%' THEN 5
WHEN languageid like '{default_languageid}%' THEN 2
ELSE 0
END
) desc limit 1
) d
) as valueto,
depth, collector, count(*) OVER() AS full_count
FROM {recursive_table} order by row {limit_clause};
"""
subquery = (
""", results as (
SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
FROM children c
JOIN values ON(values.conceptid = c.conceptidto)
WHERE LOWER(values.value) like '%%%s%%'
AND values.valuetype in ('prefLabel')
UNION
SELECT c.conceptidfrom, c.conceptidto, c.row, c.depth, c.collector
FROM children c
JOIN results r on (r.conceptidfrom=c.conceptidto)
)"""
% query.lower()
if query is not None
else ""
)
recursive_table = "results" if query else "children"
sql = sql.format(
conceptid=conceptid,
relationtypes=relationtypes,
child_valuetypes=child_valuetypes,
parent_valuetype=parent_valuetype,
depth_limit=depth_limit,
limit_clause=limit_clause,
subquery=subquery,
recursive_table=recursive_table,
languageid=languageid,
short_languageid=languageid.split("-")[0],
default_languageid=settings.LANGUAGE_CODE,
)
else:
sql = """
WITH RECURSIVE
children AS (
SELECT r.conceptidfrom, r.conceptidto, r.relationtype, 1 AS depth
FROM relations r
WHERE r.conceptidfrom = '{conceptid}'
AND ({relationtypes})
UNION
SELECT r.conceptidfrom, r.conceptidto, r.relationtype, depth+1
FROM relations r
JOIN children c ON(c.conceptidto = r.conceptidfrom)
WHERE ({relationtypes})
{depth_limit}
),
results AS (
SELECT
valuefrom.value as valuefrom, valueto.value as valueto,
valuefrom.valueid as valueidfrom, valueto.valueid as valueidto,
valuefrom.valuetype as valuetypefrom, valueto.valuetype as valuetypeto,
valuefrom.languageid as languagefrom, valueto.languageid as languageto,
dtypesfrom.category as categoryfrom, dtypesto.category as categoryto,
c.conceptidfrom, c.conceptidto
FROM values valueto
JOIN d_value_types dtypesto ON(dtypesto.valuetype = valueto.valuetype)
JOIN children c ON(c.conceptidto = valueto.conceptid)
JOIN values valuefrom ON(c.conceptidfrom = valuefrom.conceptid)
JOIN d_value_types dtypesfrom ON(dtypesfrom.valuetype = valuefrom.valuetype)
WHERE valueto.valuetype in ('{child_valuetypes}')
AND valuefrom.valuetype in ('{child_valuetypes}')
)
SELECT distinct {columns}
FROM results {limit_clause}
"""
if not columns:
columns = """
conceptidfrom::text, conceptidto::text,
valuefrom, valueto,
valueidfrom::text, valueidto::text,
valuetypefrom, valuetypeto,
languagefrom, languageto,
categoryfrom, categoryto
"""
sql = sql.format(
conceptid=conceptid,
relationtypes=relationtypes,
child_valuetypes=child_valuetypes,
columns=columns,
depth_limit=depth_limit,
limit_clause=limit_clause,
)
cursor = connection.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
return rows
def traverse(self, func, direction="down", scope=None, **kwargs):
"""
Traverses a concept graph from self to leaf (direction='down') or root (direction='up') calling
the given function on each node, passes an optional scope to each function
Return a value from the function to prematurely end the traversal
"""
_cache = kwargs.pop("_cache", [])
if self.id not in _cache:
_cache.append(self.id)
if scope is None:
ret = func(self, **kwargs)
else:
ret = func(self, scope, **kwargs)
# break out of the traversal if the function returns a value
if ret is not None:
return ret
if direction == "down":
for subconcept in self.subconcepts:
ret = subconcept.traverse(func, direction, scope, _cache=_cache, **kwargs)
if ret is not None:
return ret
else:
for parentconcept in self.parentconcepts:
ret = parentconcept.traverse(func, direction, scope, _cache=_cache, **kwargs)
if ret is not None:
return ret
def get_sortkey(self, lang=settings.LANGUAGE_CODE):
for value in self.values:
if value.type == "sortorder":
try:
return float(value.value)
except:
return None
return self.get_preflabel(lang=lang).value
def natural_keys(self, text):
"""
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
float regex comes from https://stackoverflow.com/a/12643073/190597
"""
def atof(text):
try:
retval = float(text)
except ValueError:
retval = text
return retval
return [atof(c) for c in re.split(r"[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)", str(text))]
def get_preflabel(self, lang=settings.LANGUAGE_CODE):
score = 0
ranked_labels = []
if self.values == []:
concept = Concept().get(id=self.id, include_subconcepts=False, include_parentconcepts=False, include=["label"])
else:
concept = self
for value in concept.values:
ranked_label = {"weight": 1, "value": value}
if value.type == "prefLabel":
ranked_label["weight"] = ranked_label["weight"] * 10
elif value.type == "altLabel":
ranked_label["weight"] = ranked_label["weight"] * 4
if value.language == lang:
ranked_label["weight"] = ranked_label["weight"] * 10
elif value.language.split("-")[0] == lang.split("-")[0]:
ranked_label["weight"] = ranked_label["weight"] * 5
ranked_labels.append(ranked_label)
ranked_labels = sorted(ranked_labels, key=lambda label: label["weight"], reverse=True)
if len(ranked_labels) == 0:
ranked_labels.append({"weight": 1, "value": ConceptValue()})
return ranked_labels[0]["value"]
def flatten(self, ret=None):
"""
Flattens the graph into a unordered list of concepts
"""
if ret is None:
ret = []
ret.append(self)
for subconcept in self.subconcepts:
subconcept.flatten(ret)
return ret
def addparent(self, value):
if isinstance(value, dict):
self.parentconcepts.append(Concept(value))
elif isinstance(value, Concept):
self.parentconcepts.append(value)
else:
raise Exception("Invalid parent concept definition: %s" % (value))
def addsubconcept(self, value):
if isinstance(value, dict):
self.subconcepts.append(Concept(value))
elif isinstance(value, Concept):
self.subconcepts.append(value)
else:
raise Exception(_("Invalid subconcept definition: %s") % (value))
def addrelatedconcept(self, value):
if isinstance(value, dict):
self.relatedconcepts.append(Concept(value))
elif isinstance(value, Concept):
self.relatedconcepts.append(value)
else:
raise Exception(_("Invalid related concept definition: %s") % (value))
def addvalue(self, value):
if isinstance(value, dict):
value["conceptid"] = self.id
self.values.append(ConceptValue(value))
elif isinstance(value, ConceptValue):
self.values.append(value)
elif isinstance(value, models.Value):
self.values.append(ConceptValue(value))
else:
raise Exception(_("Invalid value definition: %s") % (value))
def index(self, scheme=None):
if scheme is None:
scheme = self.get_context()
for value in self.values:
value.index(scheme=scheme)
if self.nodetype == "ConceptScheme":
scheme = None
for subconcept in self.subconcepts:
subconcept.index(scheme=scheme)
def bulk_index(self):
concept_docs = []
if self.nodetype == "ConceptScheme":
concept = Concept().get(id=self.id, values=["label"])
concept.index()
for topConcept in self.get_child_concepts_for_indexing(self.id, depth_limit=1):
concept = Concept().get(id=topConcept["conceptid"])
scheme = concept.get_context()
topConcept["top_concept"] = scheme.id
concept_docs.append(se.create_bulk_item(index=CONCEPTS_INDEX, id=topConcept["id"], data=topConcept))
for childConcept in concept.get_child_concepts_for_indexing(topConcept["conceptid"]):
childConcept["top_concept"] = scheme.id
concept_docs.append(se.create_bulk_item(index=CONCEPTS_INDEX, id=childConcept["id"], data=childConcept))
if self.nodetype == "Concept":
concept = Concept().get(id=self.id, values=["label"])
scheme = concept.get_context()
concept.index(scheme)
for childConcept in concept.get_child_concepts_for_indexing(self.id):
childConcept["top_concept"] = scheme.id
concept_docs.append(se.create_bulk_item(index=CONCEPTS_INDEX, id=childConcept["id"], data=childConcept))
se.bulk_index(concept_docs)
def delete_index(self, delete_self=False):
def delete_concept_values_index(concepts_to_delete):
for concept in concepts_to_delete.values():
query = Query(se, start=0, limit=10000)
term = Term(field="conceptid", term=concept.id)
query.add_query(term)
query.delete(index=CONCEPTS_INDEX)
if delete_self:
concepts_to_delete = Concept.gather_concepts_to_delete(self)
delete_concept_values_index(concepts_to_delete)
else:
for subconcept in self.subconcepts:
concepts_to_delete = Concept.gather_concepts_to_delete(subconcept)
delete_concept_values_index(concepts_to_delete)
def concept_tree(
self, top_concept="00000000-0000-0000-0000-000000000001", lang=settings.LANGUAGE_CODE, mode="semantic",
):
class concept(object):
def __init__(self, *args, **kwargs):
self.label = ""
self.labelid = ""
self.id = ""
self.sortorder = None
self.load_on_demand = False
self.children = []
def _findNarrowerConcept(conceptid, depth_limit=None, level=0):
labels = models.Value.objects.filter(concept=conceptid)
ret = concept()
temp = Concept()
for label in labels:
temp.addvalue(label)
if label.valuetype_id == "sortorder":
try:
ret.sortorder = float(label.value)
except:
ret.sortorder = None
label = temp.get_preflabel(lang=lang)
ret.label = label.value
ret.id = label.conceptid
ret.labelid = label.id
if mode == "semantic":
conceptrealations = models.Relation.objects.filter(
Q(conceptfrom=conceptid), Q(relationtype__category="Semantic Relations") | Q(relationtype__category="Properties")
)
if mode == "collections":
conceptrealations = models.Relation.objects.filter(
Q(conceptfrom=conceptid), Q(relationtype="member") | Q(relationtype="hasCollection")
)
if depth_limit is not None and len(conceptrealations) > 0 and level >= depth_limit:
ret.load_on_demand = True
else:
if depth_limit is not None:
level = level + 1
for relation in conceptrealations:
ret.children.append(_findNarrowerConcept(relation.conceptto_id, depth_limit=depth_limit, level=level))
ret.children = sorted(
ret.children,
key=lambda concept: self.natural_keys(concept.sortorder if concept.sortorder else concept.label),
reverse=False,
)
return ret
def _findBroaderConcept(conceptid, child_concept, depth_limit=None, level=0):
conceptrealations = models.Relation.objects.filter(
Q(conceptto=conceptid), ~Q(relationtype="related"), ~Q(relationtype__category="Mapping Properties")
)
if len(conceptrealations) > 0 and conceptid != top_concept:
labels = models.Value.objects.filter(concept=conceptrealations[0].conceptfrom_id)
ret = concept()
temp = Concept()
for label in labels:
temp.addvalue(label)
label = temp.get_preflabel(lang=lang)
ret.label = label.value
ret.id = label.conceptid
ret.labelid = label.id
ret.children.append(child_concept)
return _findBroaderConcept(conceptrealations[0].conceptfrom_id, ret, depth_limit=depth_limit, level=level)
else:
return child_concept
graph = []
if self.id is None or self.id == "" or self.id == "None" or self.id == top_concept:
if mode == "semantic":
concepts = models.Concept.objects.filter(nodetype="ConceptScheme")
for conceptmodel in concepts:
graph.append(_findNarrowerConcept(conceptmodel.pk, depth_limit=1))
if mode == "collections":
concepts = models.Concept.objects.filter(nodetype="Collection")
for conceptmodel in concepts:
graph.append(_findNarrowerConcept(conceptmodel.pk, depth_limit=0))
graph = sorted(graph, key=lambda concept: concept.label)
# graph = _findNarrowerConcept(concepts[0].pk, depth_limit=1).children
else:
graph = _findNarrowerConcept(self.id, depth_limit=1).children
# concepts = _findNarrowerConcept(self.id, depth_limit=1)
# graph = [_findBroaderConcept(self.id, concepts, depth_limit=1)]
return graph
def get_paths(self, lang=settings.LANGUAGE_CODE):
def graph_to_paths(current_concept, path=[], path_list=[], _cache=[]):
if len(path) == 0:
current_path = []
else:
current_path = path[:]
current_path.insert(
0,
{
"label": current_concept.get_preflabel(lang=lang).value,
"relationshiptype": current_concept.relationshiptype,
"id": current_concept.id,
},
)
if len(current_concept.parentconcepts) == 0 or current_concept.id in _cache:
path_list.append(current_path[:])
else:
_cache.append(current_concept.id)
for parent in current_concept.parentconcepts:
ret = graph_to_paths(parent, current_path, path_list, _cache)
return path_list
# def graph_to_paths(current_concept, **kwargs):
# path = kwargs.get('path', [])
# path_list = kwargs.get('path_list', [])
# if len(path) == 0:
# current_path = []
# else:
# current_path = path[:]
# current_path.insert(0, {'label': current_concept.get_preflabel(lang=lang).value, 'relationshiptype': current_concept.relationshiptype, 'id': current_concept.id})
# if len(current_concept.parentconcepts) == 0:
# path_list.append(current_path[:])
# # else:
# # for parent in current_concept.parentconcepts:
# # ret = graph_to_paths(parent, current_path, path_list, _cache)
# #return path_list
# self.traverse(graph_to_paths, direction='up')
return graph_to_paths(self)
def get_node_and_links(self, lang=settings.LANGUAGE_CODE):
nodes = [{"concept_id": self.id, "name": self.get_preflabel(lang=lang).value, "type": "Current"}]
links = []
def get_parent_nodes_and_links(current_concept, _cache=[]):
if current_concept.id not in _cache:
_cache.append(current_concept.id)
parents = current_concept.parentconcepts
for parent in parents:
nodes.append(
{
"concept_id": parent.id,
"name": parent.get_preflabel(lang=lang).value,
"type": "Root" if len(parent.parentconcepts) == 0 else "Ancestor",
}
)
links.append(
{"target": current_concept.id, "source": parent.id, "relationship": "broader", }
)
get_parent_nodes_and_links(parent, _cache)
get_parent_nodes_and_links(self)
# def get_parent_nodes_and_links(current_concept):
# parents = current_concept.parentconcepts
# for parent in parents:
# nodes.append({'concept_id': parent.id, 'name': parent.get_preflabel(lang=lang).value, 'type': 'Root' if len(parent.parentconcepts) == 0 else 'Ancestor'})
# links.append({'target': current_concept.id, 'source': parent.id, 'relationship': 'broader' })
# self.traverse(get_parent_nodes_and_links, direction='up')
for child in self.subconcepts:
nodes.append(
{"concept_id": child.id, "name": child.get_preflabel(lang=lang).value, "type": "Descendant", }
)
links.append({"source": self.id, "target": child.id, "relationship": "narrower"})
for related in self.relatedconcepts:
nodes.append(
{"concept_id": related.id, "name": related.get_preflabel(lang=lang).value, "type": "Related", }
)
links.append({"source": self.id, "target": related.id, "relationship": "related"})
# get unique node list and assign unique integer ids for each node (required by d3)
nodes = list({node["concept_id"]: node for node in nodes}.values())
for i in range(len(nodes)):
nodes[i]["id"] = i
for link in links:
link["source"] = i if link["source"] == nodes[i]["concept_id"] else link["source"]
link["target"] = i if link["target"] == nodes[i]["concept_id"] else link["target"]
return {"nodes": nodes, "links": links}
def get_context(self):
"""
get the Top Concept that the Concept particpates in
"""
if self.nodetype == "Concept" or self.nodetype == "Collection":
concept = Concept().get(id=self.id, include_parentconcepts=True, include=None)
def get_scheme_id(concept):
for parentconcept in concept.parentconcepts:
if parentconcept.relationshiptype == "hasTopConcept":
return concept
if len(concept.parentconcepts) > 0:
return concept.traverse(get_scheme_id, direction="up")
else:
return self
else: # like ConceptScheme or EntityType
return self
def get_scheme(self):
"""
get the ConceptScheme that the Concept particpates in
"""
topConcept = self.get_context()
if len(topConcept.parentconcepts) == 1:
if topConcept.parentconcepts[0].nodetype == "ConceptScheme":
return topConcept.parentconcepts[0]
return None
def check_if_concept_in_use(self):
"""Checks if a concept or any of its subconcepts is in use by a resource instance"""
in_use = False
cursor = connection.cursor()
for value in self.values:
sql = (
"""
SELECT count(*) from tiles t, jsonb_each_text(t.tiledata) as json_data
WHERE json_data.value = '%s'
"""
% value.id
)
cursor.execute(sql)
rows = cursor.fetchall()
if rows[0][0] > 0:
in_use = True
break
if in_use is not True:
for subconcept in self.subconcepts:
in_use = subconcept.check_if_concept_in_use()
if in_use == True:
return in_use
return in_use
def get_e55_domain(self, conceptid):
"""
For a given entitytypeid creates a dictionary representing that entitytypeid's concept graph (member pathway) formatted to support
select2 dropdowns
"""
cursor = connection.cursor()
sql = """
WITH RECURSIVE children AS (
SELECT d.conceptidfrom, d.conceptidto, c2.value, c2.valueid as valueid, c.value as valueto, c.valueid as valueidto, c.valuetype as vtype, 1 AS depth, array[d.conceptidto] AS conceptpath, array[c.valueid] AS idpath ---|NonRecursive Part
FROM relations d
JOIN values c ON(c.conceptid = d.conceptidto)
JOIN values c2 ON(c2.conceptid = d.conceptidfrom)
WHERE d.conceptidfrom = '{0}'
and c2.valuetype = 'prefLabel'
and c.valuetype in ('prefLabel', 'sortorder', 'collector')
and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
UNION
SELECT d.conceptidfrom, d.conceptidto, v2.value, v2.valueid as valueid, v.value as valueto, v.valueid as valueidto, v.valuetype as vtype, depth+1, (conceptpath || d.conceptidto), (idpath || v.valueid) ---|RecursivePart
FROM relations d
JOIN children b ON(b.conceptidto = d.conceptidfrom)
JOIN values v ON(v.conceptid = d.conceptidto)
JOIN values v2 ON(v2.conceptid = d.conceptidfrom)
WHERE v2.valuetype = 'prefLabel'
and v.valuetype in ('prefLabel','sortorder', 'collector')
and (d.relationtype = 'member' or d.relationtype = 'hasTopConcept')
) SELECT conceptidfrom::text, conceptidto::text, value, valueid::text, valueto, valueidto::text, depth, idpath::text, conceptpath::text, vtype FROM children ORDER BY depth, conceptpath;
""".format(
conceptid
)
column_names = [
"conceptidfrom",
"conceptidto",
"value",
"valueid",
"valueto",
"valueidto",
"depth",
"idpath",
"conceptpath",
"vtype",
]
cursor.execute(sql)
rows = cursor.fetchall()
class Val(object):
def __init__(self, conceptid):
self.text = ""
self.conceptid = conceptid
self.id = ""
self.sortorder = ""
self.collector = ""
self.children = []
result = Val(conceptid)
def _findNarrower(val, path, rec):
for conceptid in path:
childids = [child.conceptid for child in val.children]
if conceptid not in childids:
new_val = Val(rec["conceptidto"])
if rec["vtype"] == "sortorder":
new_val.sortorder = rec["valueto"]
elif rec["vtype"] == "prefLabel":
new_val.text = rec["valueto"]
new_val.id = rec["valueidto"]
elif rec["vtype"] == "collector":
new_val.collector = "collector"
val.children.append(new_val)
else:
for child in val.children:
if conceptid == child.conceptid:
if conceptid == path[-1]:
if rec["vtype"] == "sortorder":
child.sortorder = rec["valueto"]
elif rec["vtype"] == "prefLabel":
child.text = rec["valueto"]
child.id = rec["valueidto"]
elif rec["vtype"] == "collector":
child.collector = "collector"
path.pop(0)
_findNarrower(child, path, rec)
val.children.sort(key=lambda x: (x.sortorder, x.text))
for row in rows:
rec = dict(list(zip(column_names, row)))
path = rec["conceptpath"][1:-1].split(",")
_findNarrower(result, path, rec)
return JSONSerializer().serializeToPython(result)["children"]
def make_collection(self):
if len(self.values) == 0:
raise Exception(_("Need to include values when creating a collection"))
values = JSONSerializer().serializeToPython(self.values)
for value in values:
value["id"] = ""
collection_concept = Concept({"nodetype": "Collection", "values": values})
def create_collection(conceptfrom):
for relation in models.Relation.objects.filter(
Q(conceptfrom_id=conceptfrom.id),
Q(relationtype__category="Semantic Relations") | Q(relationtype__category="Properties"),
~Q(relationtype="related"),
):
conceptto = Concept(relation.conceptto)
if conceptfrom == self:
collection_concept.add_relation(conceptto, "member")
else:
conceptfrom.add_relation(conceptto, "member")
create_collection(conceptto)
with transaction.atomic():
collection_concept.save()
create_collection(self)
return collection_concept
class ConceptValue(object):
def __init__(self, *args, **kwargs):
self.id = ""
self.conceptid = ""
self.type = ""
self.category = ""
self.value = ""
self.language = ""
if len(args) != 0:
if isinstance(args[0], str):
try:
uuid.UUID(args[0])
self.get(args[0])
except (ValueError):
self.load(JSONDeserializer().deserialize(args[0]))
elif isinstance(args[0], object):
self.load(args[0])
def __repr__(self):
return ('%s: %s = "%s" in lang %s') % (self.__class__, self.type, self.value, self.language)
def get(self, id=""):
self.load(models.Value.objects.get(pk=id))
return self
def save(self):
if self.value.strip() != "":
self.id = self.id if (self.id != "" and self.id is not None) else str(uuid.uuid4())
value = models.Value()
value.pk = self.id
value.value = self.value
value.concept_id = self.conceptid # models.Concept.objects.get(pk=self.conceptid)
value.valuetype_id = self.type # models.DValueType.objects.get(pk=self.type)
if self.language != "":
# need to normalize language ids to the form xx-XX
lang_parts = self.language.lower().replace("_", "-").split("-")
try:
lang_parts[1] = lang_parts[1].upper()
except:
pass
self.language = "-".join(lang_parts)
value.language_id = self.language # models.DLanguage.objects.get(pk=self.language)
else:
value.language_id = settings.LANGUAGE_CODE
value.save()
self.category = value.valuetype.category
def delete(self):
if self.id != "":
newvalue = models.Value.objects.get(pk=self.id)
if newvalue.valuetype.valuetype == "image":
newvalue = models.FileValue.objects.get(pk=self.id)
newvalue.delete()
self = ConceptValue()
return self
def load(self, value):
if isinstance(value, models.Value):
self.id = str(value.pk)
self.conceptid = str(value.concept_id)
self.type = value.valuetype_id
self.category = value.valuetype.category
self.value = value.value
self.language = value.language_id
if isinstance(value, dict):
self.id = str(value["id"]) if "id" in value else ""
self.conceptid = str(value["conceptid"]) if "conceptid" in value else ""
self.type = value["type"] if "type" in value else ""
self.category = value["category"] if "category" in value else ""
self.value = value["value"] if "value" in value else ""
self.language = value["language"] if "language" in value else ""
def index(self, scheme=None):
if self.category == "label":
data = JSONSerializer().serializeToPython(self)
if scheme is None:
scheme = self.get_scheme_id()
if scheme is None:
raise Exception(_("Index of label failed. Index type (scheme id) could not be derived from the label."))
data["top_concept"] = scheme.id
se.index_data(index=CONCEPTS_INDEX, body=data, idfield="id")
def delete_index(self):
query = Query(se, start=0, limit=10000)
term = Term(field="id", term=self.id)
query.add_query(term)
query.delete(index=CONCEPTS_INDEX)
def get_scheme_id(self):
result = se.search(index=CONCEPTS_INDEX, id=self.id)
if result["found"]:
return Concept(result["top_concept"])
else:
return None
def get_preflabel_from_conceptid(conceptid, lang):
ret = None
default = {
"category": "",
"conceptid": "",
"language": "",
"value": "",
"type": "",
"id": "",
}
query = Query(se)
bool_query = Bool()
bool_query.must(Match(field="type", query="prefLabel", type="phrase"))
bool_query.filter(Terms(field="conceptid", terms=[conceptid]))
query.add_query(bool_query)
preflabels = query.search(index=CONCEPTS_INDEX)["hits"]["hits"]
for preflabel in preflabels:
default = preflabel["_source"]
if preflabel["_source"]["language"] is not None and lang is not None:
# get the label in the preferred language, otherwise get the label in the default language
if preflabel["_source"]["language"] == lang:
return preflabel["_source"]
if preflabel["_source"]["language"].split("-")[0] == lang.split("-")[0]:
ret = preflabel["_source"]
if preflabel["_source"]["language"] == settings.LANGUAGE_CODE and ret is None:
ret = preflabel["_source"]
return default if ret is None else ret
def get_valueids_from_concept_label(label, conceptid=None, lang=None):
def exact_val_match(val, conceptid=None):
# exact term match, don't care about relevance ordering.
# due to language formating issues, and with (hopefully) small result sets
# easier to have filter logic in python than to craft it in dsl
if conceptid is None:
return {"query": {"bool": {"filter": {"match_phrase": {"value": val}}}}}
else:
return {
"query": {
"bool": {"filter": [{"match_phrase": {"value": val}}, {"term": {"conceptid": conceptid}}, ]}
}
}
concept_label_results = se.search(index=CONCEPTS_INDEX, body=exact_val_match(label, conceptid))
if concept_label_results is None:
print("Found no matches for label:'{0}' and concept_id: '{1}'".format(label, conceptid))
return
return [
res["_source"]
for res in concept_label_results["hits"]["hits"]
if lang is None or res["_source"]["language"].lower() == lang.lower()
]
def get_preflabel_from_valueid(valueid, lang):
concept_label = se.search(index=CONCEPTS_INDEX, id=valueid)
if concept_label["found"]:
return get_preflabel_from_conceptid(concept_label["_source"]["conceptid"], lang)
|
GHSA-gmpq-xrxj-xh8m
|
arches/app/views/concept.py
|
@@ -380,8 +380,7 @@ def dropdown(request):
def paged_dropdown(request):
conceptid = request.GET.get("conceptid")
- query = request.GET.get("query", None)
- query = None if query == "" else query
+ query = request.GET.get("query", "")
page = int(request.GET.get("page", 1))
limit = 50
offset = (page - 1) * limit
@@ -405,25 +404,25 @@ def paged_dropdown(request):
found = True
break
if not found:
- sql = """
- SELECT value, valueid
- FROM
- (
- SELECT *, CASE WHEN LOWER(languageid) = '{languageid}' THEN 10
- WHEN LOWER(languageid) like '{short_languageid}%' THEN 5
- ELSE 0
- END score
- FROM values
- ) as vals
- WHERE LOWER(value)='{query}' AND score > 0
- AND valuetype in ('prefLabel')
- ORDER BY score desc limit 1
- """
-
languageid = get_language().lower()
- sql = sql.format(query=query.lower(), languageid=languageid, short_languageid=languageid.split("-")[0])
cursor = connection.cursor()
- cursor.execute(sql)
+ cursor.execute(
+ """
+ SELECT value, valueid
+ FROM
+ (
+ SELECT *, CASE WHEN LOWER(languageid) = %(languageid)s THEN 10
+ WHEN LOWER(languageid) like %(short_languageid)s THEN 5
+ ELSE 0
+ END score
+ FROM values
+ ) as vals
+ WHERE LOWER(value)=%(query)s AND score > 0
+ AND valuetype in ('prefLabel')
+ ORDER BY score desc limit 1
+ """,
+ {"languageid": languageid, "short_languageid": languageid.split("-")[0] + "%", "query": query.lower()},
+ )
rows = cursor.fetchall()
if len(rows) == 1:
|
"""
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import uuid
from django.db import transaction, connection
from django.db.models import Q
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseNotAllowed, HttpResponseServerError
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
from django.utils.translation import get_language
from arches.app.models import models
from arches.app.models.system_settings import settings
from arches.app.models.concept import Concept, ConceptValue, CORE_CONCEPTS, get_preflabel_from_valueid
from arches.app.search.search_engine_factory import SearchEngineInstance as se
from arches.app.search.elasticsearch_dsl_builder import Bool, Match, Query, Nested, Terms, GeoShape, Range, SimpleQueryString
from arches.app.search.mappings import CONCEPTS_INDEX
from arches.app.utils.decorators import group_required
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
from arches.app.utils.response import JSONResponse, JSONErrorResponse
from arches.app.utils.skos import SKOSWriter, SKOSReader
from arches.app.views.base import BaseManagerView
@method_decorator(group_required("RDM Administrator"), name="dispatch")
class RDMView(BaseManagerView):
def get(self, request, conceptid):
lang = request.GET.get("lang", request.LANGUAGE_CODE)
languages = sort_languages(models.Language.objects.all(), lang)
concept_schemes = []
for concept in models.Concept.objects.filter(nodetype="ConceptScheme"):
concept_schemes.append(Concept().get(id=concept.pk, include=["label"]).get_preflabel(lang=lang))
collections = []
for concept in models.Concept.objects.filter(nodetype="Collection"):
collections.append(Concept().get(id=concept.pk, include=["label"]).get_preflabel(lang=lang))
context = self.get_context_data(
main_script="rdm",
active_page="RDM",
languages=languages,
conceptid=conceptid,
concept_schemes=concept_schemes,
collections=collections,
CORE_CONCEPTS=CORE_CONCEPTS,
)
context["nav"]["icon"] = "fa fa-align-left"
context["nav"]["title"] = _("Reference Data Manager")
context["nav"]["help"] = {"title": _("Using the RDM"), "template": "rdm-help"}
return render(request, "rdm.htm", context)
def get_sparql_providers(endpoint=None):
sparql_providers = {}
for provider in settings.SPARQL_ENDPOINT_PROVIDERS:
provider_class = provider["SPARQL_ENDPOINT_PROVIDER"][settings.LANGUAGE_CODE]["value"]
Provider = import_string(provider_class)()
sparql_providers[Provider.endpoint] = Provider
if endpoint:
return sparql_providers[endpoint]
else:
return sparql_providers
def sort_languages(languages, lang):
"""
Sorts languages from the d_languages model by name. If there is more than one
default language or no default language, the default language is defined by lang
"""
if len([l for l in languages if l.isdefault == True]) != 1:
for l in languages:
if l.code == lang:
l.isdefault = True
else:
l.isdefault = False
return sorted(languages, key=lambda x: x.name)
@group_required("RDM Administrator")
def concept(request, conceptid):
f = request.GET.get("f", "json")
mode = request.GET.get("mode", "")
lang = request.GET.get("lang", request.LANGUAGE_CODE)
pretty = request.GET.get("pretty", False)
if request.method == "GET":
include_subconcepts = request.GET.get("include_subconcepts", "true") == "true"
include_parentconcepts = request.GET.get("include_parentconcepts", "true") == "true"
include_relatedconcepts = request.GET.get("include_relatedconcepts", "true") == "true"
emulate_elastic_search = request.GET.get("emulate_elastic_search", "false") == "true"
depth_limit = request.GET.get("depth_limit", None)
depth_limit = 1
if not conceptid:
return render(
request,
"views/rdm/concept-report.htm",
{
"lang": lang,
"concept_count": models.Concept.objects.filter(nodetype="Concept").count(),
"collection_count": models.Concept.objects.filter(nodetype="Collection").count(),
"scheme_count": models.Concept.objects.filter(nodetype="ConceptScheme").count(),
"entitytype_count": models.Concept.objects.filter(nodetype="EntityType").count(),
"default_report": True,
},
)
labels = []
concept_graph = Concept().get(
id=conceptid,
include_subconcepts=include_subconcepts,
include_parentconcepts=include_parentconcepts,
include_relatedconcepts=include_relatedconcepts,
depth_limit=depth_limit,
up_depth_limit=None,
lang=lang,
semantic=(mode == "semantic" or mode == ""),
)
languages = sort_languages(models.Language.objects.all(), lang)
valuetypes = models.DValueType.objects.all()
relationtypes = models.DRelationType.objects.all()
prefLabel = concept_graph.get_preflabel(lang=lang)
for subconcept in concept_graph.subconcepts:
subconcept.prefLabel = subconcept.get_preflabel(lang=lang)
for relatedconcept in concept_graph.relatedconcepts:
relatedconcept.prefLabel = relatedconcept.get_preflabel(lang=lang)
for value in concept_graph.values:
if value.category == "label":
labels.append(value)
if value.type == "image":
value.full_image_url = (
(settings.FORCE_SCRIPT_NAME if settings.FORCE_SCRIPT_NAME is not None else "") + settings.MEDIA_URL + value.value
).replace("//", "/")
if (mode == "semantic" or mode == "") and (
concept_graph.nodetype == "Concept" or concept_graph.nodetype == "ConceptScheme" or concept_graph.nodetype == "EntityType"
):
if concept_graph.nodetype == "ConceptScheme":
parent_relations = relationtypes.filter(category="Properties")
else:
parent_relations = (
relationtypes.filter(category="Semantic Relations")
.exclude(relationtype="related")
.exclude(relationtype="broader")
.exclude(relationtype="broaderTransitive")
)
return render(
request,
"views/rdm/concept-report.htm",
{
"FORCE_SCRIPT_NAME": settings.FORCE_SCRIPT_NAME,
"lang": lang,
"prefLabel": prefLabel,
"labels": labels,
"concept": concept_graph,
"languages": languages,
"sparql_providers": get_sparql_providers(),
"valuetype_labels": valuetypes.filter(category="label"),
"valuetype_notes": valuetypes.filter(category="note"),
"valuetype_related_values": valuetypes.filter(category__in=["undefined", "identifiers"]),
"parent_relations": parent_relations,
"related_relations": relationtypes.filter(Q(category="Mapping Properties") | Q(relationtype="related")),
"concept_paths": concept_graph.get_paths(lang=lang),
"graph_json": JSONSerializer().serialize(concept_graph.get_node_and_links(lang=lang)),
"direct_parents": [parent.get_preflabel(lang=lang) for parent in concept_graph.parentconcepts],
},
)
elif mode == "collections":
return render(
request,
"views/rdm/entitytype-report.htm",
{
"lang": lang,
"prefLabel": prefLabel,
"labels": labels,
"concept": concept_graph,
"languages": languages,
"valuetype_labels": valuetypes.filter(category="label"),
"valuetype_notes": valuetypes.filter(category="note"),
"valuetype_related_values": valuetypes.filter(category__in=["undefined", "identifiers"]),
"related_relations": relationtypes.filter(relationtype="member"),
"concept_paths": concept_graph.get_paths(lang=lang),
},
)
if request.method == "POST":
if len(request.FILES) > 0:
skosfile = request.FILES.get("skosfile", None)
imagefile = request.FILES.get("file", None)
if imagefile:
value = models.FileValue(
valueid=str(uuid.uuid4()),
value=request.FILES.get("file", None),
concept_id=conceptid,
valuetype_id="image",
language_id=lang,
)
value.save()
return JSONResponse(value)
elif skosfile:
overwrite_options = request.POST.get("overwrite_options", None)
staging_options = request.POST.get("staging_options", None)
skos = SKOSReader()
try:
rdf = skos.read_file(skosfile)
ret = skos.save_concepts_from_skos(rdf, overwrite_options, staging_options)
return JSONResponse(ret)
except Exception as e:
return JSONErrorResponse(_('Unable to Load SKOS File'), _('There was an issue saving the contents of the file to Arches. ') + str(e))
else:
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
concept = Concept(data)
concept.save()
concept.index()
return JSONResponse(concept)
if request.method == "DELETE":
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
concept = Concept(data)
delete_self = data["delete_self"] if "delete_self" in data else False
if not (delete_self and concept.id in CORE_CONCEPTS):
if concept.nodetype == "Collection":
concept.delete(delete_self=delete_self)
else:
in_use = False
if delete_self:
check_concept = Concept().get(data["id"], include_subconcepts=True)
in_use = check_concept.check_if_concept_in_use()
if "subconcepts" in data:
for subconcept in data["subconcepts"]:
if in_use == False:
check_concept = Concept().get(subconcept["id"], include_subconcepts=True)
in_use = check_concept.check_if_concept_in_use()
if in_use == False:
concept.delete_index(delete_self=delete_self)
concept.delete(delete_self=delete_self)
else:
return JSONErrorResponse(_('Unable to Delete'), _('This concept or one of it\'s subconcepts is already in use by an existing resource.'), {"in_use": in_use})
return JSONResponse(concept)
return HttpResponseNotFound
def export(request, conceptid):
concept_graphs = [
Concept().get(
id=conceptid,
include_subconcepts=True,
include_parentconcepts=False,
include_relatedconcepts=True,
depth_limit=None,
up_depth_limit=None,
)
]
skos = SKOSWriter()
return HttpResponse(skos.write(concept_graphs, format="pretty-xml"), content_type="application/xml")
def export_collections(request):
concept_graphs = []
for concept in models.Concept.objects.filter(nodetype_id="Collection"):
concept_graphs.append(
Concept().get(
id=concept.pk,
include_subconcepts=True,
include_parentconcepts=False,
include_relatedconcepts=False,
depth_limit=None,
up_depth_limit=None,
semantic=False,
)
)
skos = SKOSWriter()
return HttpResponse(skos.write(concept_graphs, format="pretty-xml"), content_type="application/xml")
def get_concept_collections(request):
lang = request.GET.get("lang", request.LANGUAGE_CODE)
concept_collections = Concept().concept_tree(mode="collections", lang=lang)
return JSONResponse(concept_collections)
@group_required("RDM Administrator")
def make_collection(request, conceptid):
concept = Concept().get(id=conceptid, values=[])
try:
collection_concept = concept.make_collection()
return JSONResponse({'collection': collection_concept, 'title': _('Success'), 'message': _('Collection successfully created from the selected concept')})
except:
return JSONErrorResponse(_('Unable to Make Collection'), _('Unable to make a collection from the selected concept.'))
@group_required("RDM Administrator")
def manage_parents(request, conceptid):
if request.method == "POST":
json = request.body
if json is not None:
data = JSONDeserializer().deserialize(json)
with transaction.atomic():
if len(data["deleted"]) > 0:
concept = Concept().get(id=conceptid, include=None)
for deleted in data["deleted"]:
concept.addparent(deleted)
concept.delete()
concept.bulk_index()
if len(data["added"]) > 0:
concept = Concept().get(id=conceptid)
for added in data["added"]:
concept.addparent(added)
concept.save()
concept.bulk_index()
return JSONResponse(data)
else:
return HttpResponseNotAllowed(["POST"])
return HttpResponseNotFound()
def confirm_delete(request, conceptid):
lang = request.GET.get("lang", request.LANGUAGE_CODE)
concept = Concept().get(id=conceptid)
concepts_to_delete = [
concept.get_preflabel(lang=lang).value for key, concept in Concept.gather_concepts_to_delete(concept, lang=lang).items()
]
# return HttpResponse('<div>Showing only 50 of
# %s concepts</div><ul><li>%s</ul>' % (len(concepts_to_delete), '<li>'.join(concepts_to_delete[:50]) + ''))
return HttpResponse("<ul><li>%s</ul>" % ("<li>".join(concepts_to_delete) + ""))
def dropdown(request):
conceptid = request.GET.get("conceptid")
results = Concept().get_e55_domain(conceptid)
return JSONResponse(results)
def paged_dropdown(request):
conceptid = request.GET.get("conceptid")
query = request.GET.get("query", None)
query = None if query == "" else query
page = int(request.GET.get("page", 1))
limit = 50
offset = (page - 1) * limit
results = Concept().get_child_collections_hierarchically(conceptid, offset=offset, limit=limit, query=query)
total_count = results[0][3] if len(results) > 0 else 0
data = [dict(list(zip(["valueto", "depth", "collector"], d))) for d in results]
data = [
dict(list(zip(["id", "text", "conceptid", "language", "type"], d["valueto"].values())), depth=d["depth"], collector=d["collector"])
for d in data
]
# This try/except block trys to find an exact match to the concept the user is searching and if found
# it will insert it into the results as the first item so that users don't have to scroll to find it.
# See: https://github.com/archesproject/arches/issues/8355
try:
if page == 1:
found = False
for i, d in enumerate(data):
if i <= 7 and d["text"].lower() == query.lower():
found = True
break
if not found:
sql = """
SELECT value, valueid
FROM
(
SELECT *, CASE WHEN LOWER(languageid) = '{languageid}' THEN 10
WHEN LOWER(languageid) like '{short_languageid}%' THEN 5
ELSE 0
END score
FROM values
) as vals
WHERE LOWER(value)='{query}' AND score > 0
AND valuetype in ('prefLabel')
ORDER BY score desc limit 1
"""
languageid = get_language().lower()
sql = sql.format(query=query.lower(), languageid=languageid, short_languageid=languageid.split("-")[0])
cursor = connection.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
if len(rows) == 1:
data.insert(0, {"id": str(rows[0][1]), "text": rows[0][0], "depth": 1, "collector": False})
except:
pass
return JSONResponse({"results": data, "more": offset + limit < total_count})
def get_pref_label(request):
valueid = request.GET.get("valueid")
label = get_preflabel_from_valueid(valueid, request.LANGUAGE_CODE)
return JSONResponse(label)
def search(request):
searchString = request.GET["q"]
removechildren = request.GET.get("removechildren", None)
query = Query(se, start=0, limit=100)
phrase = Match(field="value", query=searchString.lower(), type="phrase_prefix")
query.add_query(phrase)
results = query.search(index=CONCEPTS_INDEX)
ids = []
if removechildren is not None:
ids = [concept[0] for concept in Concept().get_child_concepts(removechildren, columns="conceptidto::text")]
ids.append(removechildren)
newresults = []
cached_scheme_names = {}
for result in results["hits"]["hits"]:
if result["_source"]["conceptid"] not in ids:
# first look to see if we've already retrieved the top concept name
# else look up the top concept name with ES and cache the result
top_concept = result["_source"]["top_concept"]
if top_concept in cached_scheme_names:
result["in_scheme_name"] = cached_scheme_names[top_concept]
else:
query = Query(se, start=0, limit=100)
phrase = Match(field="conceptid", query=top_concept, type="phrase")
query.add_query(phrase)
scheme = query.search(index=CONCEPTS_INDEX)
for label in scheme["hits"]["hits"]:
if label["_source"]["type"] == "prefLabel":
cached_scheme_names[top_concept] = label["_source"]["value"]
result["in_scheme_name"] = label["_source"]["value"]
newresults.append(result)
# Use the db to get the concept context but this is SLOW
# for result in results['hits']['hits']:
# if result['_source']['conceptid'] not in ids:
# concept = Concept().get(id=result['_source']['conceptid'], include_parentconcepts=True)
# pathlist = concept.get_paths()
# result['in_scheme_name'] = pathlist[0][0]['label']
# newresults.append(result)
# def crawl(conceptid, path=[]):
# query = Query(se, start=0, limit=100)
# bool = Bool()
# bool.must(Match(field='conceptto', query=conceptid, type='phrase'))
# bool.must(Match(field='relationtype', query='narrower', type='phrase'))
# query.add_query(bool)
# relations = query.search(index='concept_relations')
# for relation in relations['hits']['hits']:
# path.insert(0, relation)
# crawl(relation['_source']['conceptfrom'], path=path)
# return path
# for result in results['hits']['hits']:
# if result['_source']['conceptid'] not in ids:
# concept_relations = crawl(result['_source']['conceptid'], path=[])
# if len(concept_relations) > 0:
# conceptid = concept_relations[0]['_source']['conceptfrom']
# if conceptid in cached_scheme_names:
# result['in_scheme_name'] = cached_scheme_names[conceptid]
# else:
# result['in_scheme_name'] = get_preflabel_from_conceptid(conceptid, lang=request.LANGUAGE_CODE)['value']
# cached_scheme_names[conceptid] = result['in_scheme_name']
# newresults.append(result)
results["hits"]["hits"] = newresults
return JSONResponse(results)
def add_concepts_from_sparql_endpoint(request, conceptid):
if request.method == "POST":
json = request.body
if json is not None:
data = JSONDeserializer().deserialize(json)
parentconcept = Concept({"id": conceptid, "nodetype": data["model"]["nodetype"]})
if parentconcept.nodetype == "Concept":
relationshiptype = "narrower"
elif parentconcept.nodetype == "ConceptScheme":
relationshiptype = "hasTopConcept"
provider = get_sparql_providers(data["endpoint"])
try:
parentconcept.subconcepts = provider.get_concepts(data["ids"])
except Exception as e:
return HttpResponseServerError(e.message)
for subconcept in parentconcept.subconcepts:
subconcept.relationshiptype = relationshiptype
parentconcept.save()
parentconcept.index()
return JSONResponse(parentconcept, indent=4)
else:
return HttpResponseNotAllowed(["POST"])
return HttpResponseNotFound()
def search_sparql_endpoint_for_concepts(request):
provider = get_sparql_providers(request.GET.get("endpoint"))
results = provider.search_for_concepts(request.GET.get("terms"))
return JSONResponse(results)
def concept_tree(request, mode):
lang = request.GET.get("lang", request.LANGUAGE_CODE)
conceptid = request.GET.get("node", None)
concepts = Concept({"id": conceptid}).concept_tree(lang=lang, mode=mode)
return JSONResponse(concepts, indent=4)
def concept_value(request):
if request.method == "DELETE":
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
value = ConceptValue(data)
value.delete_index()
value.delete()
return JSONResponse(value)
if request.method == "GET":
valueid = request.GET.get("valueid")
value = models.Value.objects.get(pk=valueid)
return JSONResponse(value)
return HttpResponseNotFound
|
GHSA-gmpq-xrxj-xh8m
|
cps/helper.py
| "@@ -734,10 +734,10 @@ def save_cover_from_url(url, book_path):\n if not cli.allow_localhost(...TRUNCATED) | "# -*- coding: utf-8 -*-\n\n# This file is part of the Calibre-Web (https://github.com/janeczku/cal(...TRUNCATED) |
GHSA-2647-c639-qv2j
|
src/werkzeug/_internal.py
| "@@ -34,7 +34,7 @@\n _legal_cookie_chars_re = rb\"[\\w\\d!#%&\\'~_`><@,:/\\$\\*\\+\\-\\.\\^\\|\\)\\((...TRUNCATED) | "import logging\nimport operator\nimport re\nimport string\nimport sys\nimport typing\nimport typing(...TRUNCATED) |
GHSA-px8h-6qxv-m22q
|
src/werkzeug/sansio/http.py
| "@@ -126,10 +126,6 @@ def parse_cookie(\n def _parse_pairs() -> t.Iterator[t.Tuple[str, str]]:\n(...TRUNCATED) | "import re\nimport typing as t\nfrom datetime import datetime\n\nfrom .._internal import _cookie_par(...TRUNCATED) |
GHSA-px8h-6qxv-m22q
|
tests/test_http.py
| "@@ -412,7 +412,8 @@ def test_is_resource_modified_for_range_requests(self):\n def test_parse_co(...TRUNCATED) | "import base64\nfrom datetime import date\nfrom datetime import datetime\nfrom datetime import timed(...TRUNCATED) |
GHSA-px8h-6qxv-m22q
|
tests/compiler/ir/test_optimize_ir.py
| "@@ -143,7 +143,9 @@\n ([\"sub\", \"x\", 0], [\"x\"]),\n ([\"sub\", \"x\", \"x\"], [0]),\n (...TRUNCATED) | "import pytest\n\nfrom vyper.codegen.ir_node import IRnode\nfrom vyper.exceptions import StaticAsser(...TRUNCATED) |
GHSA-c647-pxm2-c52w
|
tests/parser/functions/test_create_functions.py
| "@@ -431,3 +431,212 @@ def test2(target: address, salt: bytes32) -> address:\n # test2 = c.test2(...TRUNCATED) | "import pytest\nimport rlp\nfrom eth.codecs import abi\nfrom hexbytes import HexBytes\n\nimport vype(...TRUNCATED) |
GHSA-c647-pxm2-c52w
|
tests/parser/functions/test_raw_call.py
| "@@ -426,6 +426,164 @@ def baz(_addr: address, should_raise: bool) -> uint256:\n assert caller.b(...TRUNCATED) | "import pytest\nfrom hexbytes import HexBytes\n\nfrom vyper import compile_code\nfrom vyper.builtins(...TRUNCATED) |
GHSA-c647-pxm2-c52w
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 5