Dataset Viewer
repo
stringlengths 7
53
| instance_id
stringlengths 10
57
| base_commit
stringlengths 40
40
| patch
stringlengths 185
2.85M
| test_patch
stringlengths 192
2.22M
| problem_statement
stringlengths 10
69.1k
| hints_text
stringlengths 0
294k
| created_at
stringlengths 23
25
| version
stringclasses 1
value | FAIL_TO_PASS
sequencelengths 1
19.5k
| PASS_TO_PASS
sequencelengths 0
29.9k
| pull_number
stringlengths 0
5
| issue_numbers
stringlengths 0
29
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
codingedward/flask-sieve | codingedward__flask-sieve-20 | 2a8e2851c2a2f91a73f9d494883a867469af3f26 | diff --git a/flask_sieve/conditional_inclusion_rules.py b/flask_sieve/conditional_inclusion_rules.py
new file mode 100644
index 0000000..99001ba
--- /dev/null
+++ b/flask_sieve/conditional_inclusion_rules.py
@@ -0,0 +1,8 @@
+conditional_inclusion_rules = [
+ 'required_if',
+ 'required_unless',
+ 'required_with',
+ 'required_with_all',
+ 'required_without',
+ 'required_without_all',
+]
diff --git a/flask_sieve/rules_processor.py b/flask_sieve/rules_processor.py
index c6e4f7e..d6f58b2 100644
--- a/flask_sieve/rules_processor.py
+++ b/flask_sieve/rules_processor.py
@@ -13,6 +13,7 @@ from PIL import Image
from dateutil.parser import parse as dateparse
from werkzeug.datastructures import FileStorage
+from .conditional_inclusion_rules import conditional_inclusion_rules
class RulesProcessor:
def __init__(self, app=None, rules=None, request=None):
@@ -36,24 +37,27 @@ class RulesProcessor:
self._attributes_validations = {}
for attribute, rules in self._rules.items():
should_bail = self._has_rule(rules, 'bail')
- nullable = self._has_rule(rules, 'nullable')
validations = []
for rule in rules:
+ is_valid = False
handler = self._get_rule_handler(rule['name'])
value = self._attribute_value(attribute)
attr_type = self._get_type(value, rules)
- is_valid = False
- if value is None and nullable:
+ is_nullable = self._is_attribute_nullable(
+ attribute=attribute,
+ params=rule['params'],
+ rules=rules,
+ )
+ if value is None and is_nullable:
is_valid = True
else:
is_valid = handler(
value=value,
attribute=attribute,
params=rule['params'],
- nullable=nullable,
+ nullable=is_nullable,
rules=rules
)
-
validations.append({
'attribute': attribute,
'rule': rule['name'],
@@ -523,6 +527,29 @@ class RulesProcessor:
r'^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}$',
str(value).lower()
) is not None
+
+ def _is_attribute_nullable(self, attribute, params, rules, **kwargs):
+ is_explicitly_nullable = self._has_rule(rules, 'nullable')
+ if is_explicitly_nullable:
+ return True
+ value = self._attribute_value(attribute)
+ if value is not None:
+ return False
+ attribute_conditional_rules = list(filter(lambda rule: rule['name'] in conditional_inclusion_rules, rules))
+ if len(attribute_conditional_rules) == 0:
+ return False
+ for conditional_rule in attribute_conditional_rules:
+ handler = self._get_rule_handler(conditional_rule['name'])
+ is_conditional_rule_valid = handler(
+ value=value,
+ attribute=attribute,
+ params=conditional_rule['params'],
+ nullable=False,
+ rules=rules
+ )
+ if not is_conditional_rule_valid:
+ return False
+ return True
@staticmethod
def _compare_dates(first, second, comparator):
@@ -628,4 +655,3 @@ class RulesProcessor:
'Cannot call method %s with value %s' %
(method.__name__, str(value))
)
-
diff --git a/watch.sh b/watch.sh
new file mode 100755
index 0000000..340fb07
--- /dev/null
+++ b/watch.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+watchman-make -p "**/*.py" --run "nosetests --with-coverage --cover-package flask_sieve"
| diff --git a/tests/test_rules_processor.py b/tests/test_rules_processor.py
index abbf64b..167145c 100644
--- a/tests/test_rules_processor.py
+++ b/tests/test_rules_processor.py
@@ -696,8 +696,8 @@ class TestRulesProcessor(unittest.TestCase):
request={'field': '', 'field_2': 'three'}
)
self.assert_passes(
- rules={'field': ['size:0']},
- request={'field': self.image_file}
+ rules={'field': ['required_if:field_2,one,two', 'integer']},
+ request={'field_1': '', 'field_2': 'xxxx'}
)
self.assert_fails(
rules={'field': ['required_if:field_2,one,two']},
@@ -714,8 +714,8 @@ class TestRulesProcessor(unittest.TestCase):
request={'field': '', 'field_2': 'one'}
)
self.assert_fails(
- rules={'field': ['required_unless:field_2,one,two']},
- request={'field': '', 'field_2': 'three'}
+ rules={'field': ['required_unless:field_2,one,two', 'string']},
+ request={'field_2': 'three'}
)
def test_validates_required_with(self):
@@ -763,6 +763,43 @@ class TestRulesProcessor(unittest.TestCase):
rules={'field': ['required_without:field_2,field_3']},
request={'field': '', 'field_2': ''}
)
+ self.assert_passes(
+ rules={
+ 'id': ['required_without:name', 'integer'],
+ 'name': ['required_without:id', 'string', 'confirmed']
+ },
+ request={'id': 123}
+ )
+
+ def test_validates_required_multiple_required_withouts(self):
+ self.assert_passes(
+ rules={
+ 'id': ['required_without:name', 'integer'],
+ 'name': ['required_without:id', 'string'],
+ },
+ request={'id': 1, 'name': ''}
+ )
+ self.assert_passes(
+ rules={
+ 'id': ['required_without:name', 'integer'],
+ 'name': ['required_without:id', 'string', 'nullable'],
+ },
+ request={'id': 1},
+ )
+ self.assert_passes(
+ rules={
+ 'id': ['required_without:name', 'integer'],
+ 'id2': ['required_without:id', 'integer'],
+ },
+ request={'id': 1}
+ )
+ self.assert_fails(
+ rules={
+ 'id': ['required_without:name', 'integer'],
+ 'id2': ['required_without:id', 'integer'],
+ },
+ request={'name': 'hi'}
+ )
def test_validates_required_without_all(self):
self.assert_passes(
@@ -787,6 +824,10 @@ class TestRulesProcessor(unittest.TestCase):
rules={'field': ['same:field_2']},
request={'field': 1, 'field_2': 1}
)
+ self.assert_fails(
+ rules={'field': ['same:field_2']},
+ request={'field': '1', 'field_2': 1}
+ )
self.assert_fails(
rules={'field': ['same:field_2']},
request={'field': 1, 'field_2': 2}
| validation error
Hello
I think I found a bug with `required_without` and `required_without_all` when I used it with parameter type.
example:
```
def rules(self):
return {
'id': ['required_without:name', integer],
'name': ['required_without:id', 'string']
}
```
and When the request json only has ID
```
{
"id": 123
}
```
the error response is:
```
{
"errors": {
"user_id": [
"The name must be an string."
]
},
"message": "Validation error",
"success": false
}
```
Can You please check this issue?
| 2020-12-05 23:20:04+00:00 | -1.0 | [
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_required_if",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_required_multiple_required_withouts",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_required_without"
] | [
"tests/test_rules_processor.py::TestRulesProcessor::test_allows_nullable_fields",
"tests/test_rules_processor.py::TestRulesProcessor::test_assert_params_size",
"tests/test_rules_processor.py::TestRulesProcessor::test_compare_dates",
"tests/test_rules_processor.py::TestRulesProcessor::test_custom_handlers",
"tests/test_rules_processor.py::TestRulesProcessor::test_get_rule_handler",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_accepted",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_active_url",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_after",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_after_or_equal",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_alpha",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_alpha_dash",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_alpha_num",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_array",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_before",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_before_or_equal",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_between",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_boolean",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_confirmed",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_date",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_date_equals",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_different",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_digits",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_digits_between",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_dimensions",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_distinct",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_email",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_exists",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_extension",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_file",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_filled",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_gt",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_gte",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_image",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_in",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_in_array",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_integer",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_ip",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_ipv4",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_ipv6",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_json",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_lt",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_lte",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_max",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_mime_types",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_min",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_not_in",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_not_regex",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_numeric",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_present",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_regex",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_required",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_required_unless",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_required_with",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_required_with_all",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_required_without_all",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_same",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_size",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_starts_with",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_string",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_timezone",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_unique",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_url",
"tests/test_rules_processor.py::TestRulesProcessor::test_validates_uuid"
] | |||
adrienverge/yamllint | adrienverge__yamllint-582 | 7f2c0715456939ac6196d8f457c6c540086f3608 | diff --git a/yamllint/rules/document_end.py b/yamllint/rules/document_end.py
index 3fc14ad9..2337484c 100644
--- a/yamllint/rules/document_end.py
+++ b/yamllint/rules/document_end.py
@@ -99,11 +99,13 @@ def check(conf, token, prev, next, nextnext, context):
prev_is_end_or_stream_start = isinstance(
prev, (yaml.DocumentEndToken, yaml.StreamStartToken)
)
+ prev_is_directive = isinstance(prev, yaml.DirectiveToken)
if is_stream_end and not prev_is_end_or_stream_start:
yield LintProblem(token.start_mark.line, 1,
'missing document end "..."')
- elif is_start and not prev_is_end_or_stream_start:
+ elif is_start and not (prev_is_end_or_stream_start
+ or prev_is_directive):
yield LintProblem(token.start_mark.line + 1, 1,
'missing document end "..."')
| diff --git a/tests/rules/test_document_end.py b/tests/rules/test_document_end.py
index 927d0a75..8340c6f8 100644
--- a/tests/rules/test_document_end.py
+++ b/tests/rules/test_document_end.py
@@ -71,3 +71,22 @@ def test_multiple_documents(self):
'---\n'
'third: document\n'
'...\n', conf, problem=(6, 1))
+
+ def test_directives(self):
+ conf = 'document-end: {present: true}'
+ self.check('%YAML 1.2\n'
+ '---\n'
+ 'document: end\n'
+ '...\n', conf)
+ self.check('%YAML 1.2\n'
+ '%TAG ! tag:clarkevans.com,2002:\n'
+ '---\n'
+ 'document: end\n'
+ '...\n', conf)
+ self.check('---\n'
+ 'first: document\n'
+ '...\n'
+ '%YAML 1.2\n'
+ '---\n'
+ 'second: document\n'
+ '...\n', conf)
| [BUG] False positive "missing document end" error when directive (%) is present
Consider the following yaml:
```
%YAML 1.2
---
hello: world!
...
```
With `document-end: enable` you get `2:1 error missing document end "..." (document-end)`.
That's because the current token (`---`) is `DocumentStartToken` and previous token (`%YAML 1.2`) is `DirectiveToken`, but yamllint [belives](https://github.com/adrienverge/yamllint/blob/b92fc9cb3133673f284ccdef76446673bd81c7a8/yamllint/rules/document_end.py#L106) that accepted previous tokens are [`DocumentEndToken` or `StreamStartToken`](https://github.com/adrienverge/yamllint/blob/b92fc9cb3133673f284ccdef76446673bd81c7a8/yamllint/rules/document_end.py#L100) only.
| Hello Andrei, thanks for the bug report! I can confirm the problem with yamllint 1.30.0.
Contributions are welcome :+1: (a fix in `yamllint/rules/document_end.py` + a non-regression test in `tests/rules/test_document_end.py`)
Hey @adrienverge, I'd like to work on this issue if I get some more information on it.
@tarannum-2002 All the information is already there:
```
$ cat << EOF > foobar.yaml
%YAML 1.2
---
hello: world!
...
EOF
$
$ cat <<EOF > config.yaml
rules:
document-end: enable
EOF
$
$ yamllint -c config.yaml foobar.yaml
foobar.yaml
2:1 error missing document end "..." (document-end)
$
``` | 2023-06-24T21:48:28.000 | -1.0 | [
"tests/rules/test_document_end.py::DocumentEndTestCase::test_directives"
] | [
"tests/rules/test_document_end.py::DocumentEndTestCase::test_forbidden",
"tests/rules/test_document_end.py::DocumentEndTestCase::test_disabled",
"tests/rules/test_document_end.py::DocumentEndTestCase::test_multiple_documents",
"tests/rules/test_document_end.py::DocumentEndTestCase::test_required"
] | 582 | "['560']" |
streamlink/streamlink | streamlink__streamlink-1070 | 4761570f479ba51ffeb099a4e8a2ed3fea6df72d | diff --git a/src/streamlink/plugins/tvplayer.py b/src/streamlink/plugins/tvplayer.py
index 249e85fa..f79474e6 100644
--- a/src/streamlink/plugins/tvplayer.py
+++ b/src/streamlink/plugins/tvplayer.py
@@ -15,7 +15,7 @@ class TVPlayer(Plugin):
dummy_postcode = "SE1 9LT" # location of ITV HQ in London
url_re = re.compile(r"https?://(?:www.)?tvplayer.com/(:?watch/?|watch/(.+)?)")
- stream_attrs_re = re.compile(r'data-(resource|token)\s*=\s*"(.*?)"', re.S)
+ stream_attrs_re = re.compile(r'data-(resource|token|channel-id)\s*=\s*"(.*?)"', re.S)
login_token_re = re.compile(r'input.*?name="token".*?value="(\w+)"')
stream_schema = validate.Schema({
"tvplayer": validate.Schema({
@@ -58,20 +58,22 @@ class TVPlayer(Plugin):
# there is a 302 redirect on a successful login
return res2.status_code == 302
- def _get_stream_data(self, resource, token, service=1):
+ def _get_stream_data(self, resource, channel_id, token, service=1):
# Get the context info (validation token and platform)
self.logger.debug("Getting stream information for resource={0}".format(resource))
context_res = http.get(self.context_url, params={"resource": resource,
"gen": token})
context_data = http.json(context_res, schema=self.context_schema)
+ self.logger.debug("Context data: {0}", str(context_data))
# get the stream urls
res = http.post(self.api_url, data=dict(
service=service,
- id=resource,
+ id=channel_id,
validate=context_data["validate"],
token=context_data.get("token"),
- platform=context_data["platform"]["key"]))
+ platform=context_data["platform"]["key"]),
+ raise_for_status=False)
return http.json(res, schema=self.stream_schema)
@@ -91,7 +93,8 @@ class TVPlayer(Plugin):
data=dict(postcode=self.dummy_postcode),
params=dict(return_url=self.url))
- stream_attrs = dict((k, v.strip('"')) for k, v in self.stream_attrs_re.findall(res.text))
+ stream_attrs = dict((k.replace("-", "_"), v.strip('"')) for k, v in self.stream_attrs_re.findall(res.text))
+ self.logger.debug("Got stream attrs: {0}", str(stream_attrs))
if "resource" in stream_attrs and "token" in stream_attrs:
stream_data = self._get_stream_data(**stream_attrs)
| diff --git a/tests/test_plugin_tvplayer.py b/tests/test_plugin_tvplayer.py
index 52f27dc0..f9f13367 100644
--- a/tests/test_plugin_tvplayer.py
+++ b/tests/test_plugin_tvplayer.py
@@ -41,7 +41,7 @@ class TestPluginTVPlayer(unittest.TestCase):
page_resp = Mock()
page_resp.text = u"""
<div class="video-js theoplayer-skin theo-seekbar-above-controls content-box vjs-fluid"
- data-resource= "89"
+ data-resource= "bbcone"
data-token = "1324567894561268987948596154656418448489159"
data-content-type="live"
data-environment="live"
@@ -54,6 +54,7 @@ class TestPluginTVPlayer(unittest.TestCase):
mock_http.get.return_value = page_resp
hlsstream.parse_variant_playlist.return_value = {"test": HLSStream(self.session, "http://test.se/stream1")}
+ TVPlayer.bind(self.session, "test.plugin.tvplayer")
plugin = TVPlayer("http://tvplayer.com/watch/dave")
streams = plugin.get_streams()
@@ -63,7 +64,7 @@ class TestPluginTVPlayer(unittest.TestCase):
# test the url is used correctly
mock_http.get.assert_called_with("http://tvplayer.com/watch/dave")
# test that the correct API call is made
- mock_get_stream_data.assert_called_with(resource="89", token="1324567894561268987948596154656418448489159")
+ mock_get_stream_data.assert_called_with(resource="bbcone", channel_id="89", token="1324567894561268987948596154656418448489159")
# test that the correct URL is used for the HLSStream
hlsstream.parse_variant_playlist.assert_called_with(ANY, "http://test.se/stream1")
@@ -76,6 +77,7 @@ class TestPluginTVPlayer(unittest.TestCase):
"""
mock_http.get.return_value = page_resp
+ TVPlayer.bind(self.session, "test.plugin.tvplayer")
plugin = TVPlayer("http://tvplayer.com/watch/dave")
streams = plugin.get_streams()
| tvplayer plugin broken
https://tvplayer.com/watch/bbcone
Unable to open URL: http://api.tvplayer.com/api/v2/stream/live (400 Client Error: Bad Request for url: http://api.tvplayer.com/api/v2/stream/live) | 2017-07-06 16:07:35+00:00 | -1.0 | [
"tests/test_plugin_tvplayer.py::TestPluginTVPlayer::test_get_streams"
] | [
"tests/test_plugin_tvplayer.py::TestPluginTVPlayer::test_can_handle_url",
"tests/test_plugin_tvplayer.py::TestPluginTVPlayer::test_get_invalid_page"
] | |||
PyCQA/flake8 | PyCQA__flake8-1624 | fbb33430e6e0b326744e2e703db77c4773de93de | diff --git a/src/flake8/main/application.py b/src/flake8/main/application.py
index 13ece4e..5178abb 100644
--- a/src/flake8/main/application.py
+++ b/src/flake8/main/application.py
@@ -132,6 +132,7 @@ class Application:
version=flake8.__version__,
plugin_versions=self.plugins.versions_str(),
parents=[self.prelim_arg_parser],
+ formatter_names=list(self.plugins.reporters),
)
options.register_default_options(self.option_manager)
self.option_manager.register_plugins(self.plugins)
diff --git a/src/flake8/main/options.py b/src/flake8/main/options.py
index 9b374ab..86a6cf8 100644
--- a/src/flake8/main/options.py
+++ b/src/flake8/main/options.py
@@ -220,7 +220,15 @@ def register_default_options(option_manager: OptionManager) -> None:
metavar="format",
default="default",
parse_from_config=True,
- help="Format errors according to the chosen formatter.",
+ help=(
+ f"Format errors according to the chosen formatter "
+ f"({', '.join(sorted(option_manager.formatter_names))}) "
+ f"or a format string containing %%-style "
+ f"mapping keys (code, col, path, row, text). "
+ f"For example, "
+ f"``--format=pylint`` or ``--format='%%(path)s %%(code)s'``. "
+ f"(Default: %(default)s)"
+ ),
)
add_option(
diff --git a/src/flake8/options/manager.py b/src/flake8/options/manager.py
index e333c9e..7c40cb9 100644
--- a/src/flake8/options/manager.py
+++ b/src/flake8/options/manager.py
@@ -317,6 +317,7 @@ class OptionManager:
version: str,
plugin_versions: str,
parents: list[argparse.ArgumentParser],
+ formatter_names: list[str],
) -> None:
"""Initialize an instance of an OptionManager.
@@ -330,6 +331,7 @@ class OptionManager:
A list of ArgumentParser objects whose arguments should also be
included.
"""
+ self.formatter_names = formatter_names
self.parser = argparse.ArgumentParser(
prog="flake8",
usage="%(prog)s [options] file file ...",
| diff --git a/tests/integration/test_aggregator.py b/tests/integration/test_aggregator.py
index a5b39d7..006ac5f 100644
--- a/tests/integration/test_aggregator.py
+++ b/tests/integration/test_aggregator.py
@@ -18,6 +18,7 @@ def optmanager():
version="3.0.0",
plugin_versions="",
parents=[],
+ formatter_names=[],
)
options.register_default_options(option_manager)
return option_manager
diff --git a/tests/integration/test_main.py b/tests/integration/test_main.py
index e711fb3..dfa0e0b 100644
--- a/tests/integration/test_main.py
+++ b/tests/integration/test_main.py
@@ -406,3 +406,13 @@ The specified config file does not exist: missing.cfg
out, err = capsys.readouterr()
assert out == expected
assert err == ""
+
+
+def test_format_option_help(capsys):
+ """Test that help displays list of available formatters."""
+ with pytest.raises(SystemExit):
+ cli.main(["--help"])
+
+ out, err = capsys.readouterr()
+ assert "(default, pylint, quiet-filename, quiet-nothing)" in out
+ assert err == ""
diff --git a/tests/integration/test_plugins.py b/tests/integration/test_plugins.py
index 0b4424a..edba048 100644
--- a/tests/integration/test_plugins.py
+++ b/tests/integration/test_plugins.py
@@ -100,6 +100,7 @@ def test_local_plugin_can_add_option(local_config):
version="123",
plugin_versions="",
parents=[stage1_parser],
+ formatter_names=[],
)
register_default_options(option_manager)
option_manager.register_plugins(loaded_plugins)
diff --git a/tests/unit/test_option_manager.py b/tests/unit/test_option_manager.py
index d5b88c3..3d3ddc1 100644
--- a/tests/unit/test_option_manager.py
+++ b/tests/unit/test_option_manager.py
@@ -17,7 +17,10 @@ TEST_VERSION = "3.0.0b1"
def optmanager():
"""Generate a simple OptionManager with default test arguments."""
return manager.OptionManager(
- version=TEST_VERSION, plugin_versions="", parents=[]
+ version=TEST_VERSION,
+ plugin_versions="",
+ parents=[],
+ formatter_names=[],
)
@@ -34,7 +37,10 @@ def test_option_manager_including_parent_options():
# WHEN
optmanager = manager.OptionManager(
- version=TEST_VERSION, plugin_versions="", parents=[parent_parser]
+ version=TEST_VERSION,
+ plugin_versions="",
+ parents=[parent_parser],
+ formatter_names=[],
)
options = optmanager.parse_args(["--parent", "foo"])
diff --git a/tests/unit/test_options_config.py b/tests/unit/test_options_config.py
index 0890ea9..8c8f0cb 100644
--- a/tests/unit/test_options_config.py
+++ b/tests/unit/test_options_config.py
@@ -168,7 +168,9 @@ def test_load_extra_config_utf8(tmpdir):
@pytest.fixture
def opt_manager():
- ret = OptionManager(version="123", plugin_versions="", parents=[])
+ ret = OptionManager(
+ version="123", plugin_versions="", parents=[], formatter_names=[]
+ )
register_default_options(ret)
return ret
| Display list of available formatters with help for --format
In GitLab by @blueyed on Apr 26, 2018, 03:32
`flake8 --help` contains:
> --format=format Format errors according to the chosen formatter.
It would be nice/helpful if the list of available formats would be listed there. | 2022-07-28 20:48:41+00:00 | -1.0 | [
"tests/integration/test_aggregator.py::test_aggregate_options_with_config",
"tests/integration/test_aggregator.py::test_aggregate_options_when_isolated",
"tests/integration/test_main.py::test_format_option_help",
"tests/integration/test_plugins.py::test_local_plugin_can_add_option",
"tests/unit/test_option_manager.py::test_option_manager_creates_option_parser",
"tests/unit/test_option_manager.py::test_option_manager_including_parent_options",
"tests/unit/test_option_manager.py::test_parse_args_forwarding_default_values",
"tests/unit/test_option_manager.py::test_parse_args_forwarding_type_coercion",
"tests/unit/test_option_manager.py::test_add_option_short_option_only",
"tests/unit/test_option_manager.py::test_add_option_long_option_only",
"tests/unit/test_option_manager.py::test_add_short_and_long_option_names",
"tests/unit/test_option_manager.py::test_add_option_with_custom_args",
"tests/unit/test_option_manager.py::test_parse_args_normalize_path",
"tests/unit/test_option_manager.py::test_parse_args_handles_comma_separated_defaults",
"tests/unit/test_option_manager.py::test_parse_args_handles_comma_separated_lists",
"tests/unit/test_option_manager.py::test_parse_args_normalize_paths",
"tests/unit/test_option_manager.py::test_extend_default_ignore",
"tests/unit/test_option_manager.py::test_optparse_normalize_callback_option_legacy",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[int-5-5]",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[long-6-6]",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[string-foo-foo]",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[float-1.5-1.5]",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[complex-1+5j-(1+5j)]",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[str-foo-foo]",
"tests/unit/test_option_manager.py::test_optparse_normalize_choice_type",
"tests/unit/test_option_manager.py::test_optparse_normalize_help",
"tests/unit/test_option_manager.py::test_parse_invalid_jobs_argument",
"tests/unit/test_options_config.py::test_parse_config_no_values",
"tests/unit/test_options_config.py::test_parse_config_typed_values",
"tests/unit/test_options_config.py::test_parse_config_ignores_unknowns"
] | [
"tests/integration/test_main.py::test_diff_option",
"tests/integration/test_main.py::test_form_feed_line_split",
"tests/integration/test_main.py::test_e101_indent_char_does_not_reset",
"tests/integration/test_main.py::test_statistics_option",
"tests/integration/test_main.py::test_show_source_option",
"tests/integration/test_main.py::test_extend_exclude",
"tests/integration/test_main.py::test_malformed_per_file_ignores_error",
"tests/integration/test_main.py::test_tokenization_error_but_not_syntax_error",
"tests/integration/test_main.py::test_tokenization_error_is_a_syntax_error",
"tests/integration/test_main.py::test_bug_report_successful",
"tests/integration/test_main.py::test_benchmark_successful",
"tests/integration/test_main.py::test_specific_noqa_does_not_clobber_pycodestyle_noqa",
"tests/integration/test_main.py::test_specific_noqa_on_line_with_continuation",
"tests/integration/test_main.py::test_physical_line_file_not_ending_in_newline",
"tests/integration/test_main.py::test_physical_line_file_not_ending_in_newline_trailing_ws",
"tests/integration/test_main.py::test_obtaining_args_from_sys_argv_when_not_explicity_provided",
"tests/integration/test_main.py::test_cli_config_option_respected",
"tests/integration/test_main.py::test_cli_isolated_overrides_config_option",
"tests/integration/test_main.py::test_file_not_found",
"tests/integration/test_main.py::test_output_file",
"tests/integration/test_main.py::test_early_keyboard_interrupt_does_not_crash",
"tests/integration/test_main.py::test_config_file_not_found",
"tests/integration/test_plugins.py::test_enable_local_plugin_from_config",
"tests/integration/test_plugins.py::test_plugin_gets_enabled_by_default",
"tests/integration/test_plugins.py::test_plugin_off_by_default",
"tests/integration/test_plugins.py::test_physical_line_plugin_multiline_string",
"tests/unit/test_option_manager.py::test_parse_valid_jobs_argument[auto-True--1]",
"tests/unit/test_option_manager.py::test_parse_valid_jobs_argument[4-False-4]",
"tests/unit/test_option_manager.py::test_jobs_argument_str",
"tests/unit/test_option_manager.py::test_jobs_argument_repr",
"tests/unit/test_options_config.py::test_config_not_found_returns_none",
"tests/unit/test_options_config.py::test_config_file_without_section_is_not_considered",
"tests/unit/test_options_config.py::test_config_file_with_parse_error_is_not_considered",
"tests/unit/test_options_config.py::test_config_file_with_encoding_error_is_not_considered",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[setup.cfg]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[tox.ini]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[.flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8:local-plugins]",
"tests/unit/test_options_config.py::test_find_config_searches_upwards",
"tests/unit/test_options_config.py::test_find_config_ignores_homedir",
"tests/unit/test_options_config.py::test_find_config_ignores_unknown_homedir",
"tests/unit/test_options_config.py::test_load_config_config_specified_skips_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_file_does_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_found_sets_cfg_dir_to_pwd",
"tests/unit/test_options_config.py::test_load_config_isolated_ignores_configuration",
"tests/unit/test_options_config.py::test_load_config_append_config",
"tests/unit/test_options_config.py::test_load_auto_config_utf8",
"tests/unit/test_options_config.py::test_load_explicit_config_utf8",
"tests/unit/test_options_config.py::test_load_extra_config_utf8",
"tests/unit/test_options_config.py::test_load_config_missing_file_raises_exception"
] | |||
arviz-devs/arviz | arviz-devs__arviz-1076 | 0eef3b95eff477541ba599f15687612652074b7e | diff --git a/arviz/plots/backends/matplotlib/traceplot.py b/arviz/plots/backends/matplotlib/traceplot.py
--- a/arviz/plots/backends/matplotlib/traceplot.py
+++ b/arviz/plots/backends/matplotlib/traceplot.py
@@ -1,5 +1,6 @@
"""Matplotlib traceplot."""
+import warnings
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
@@ -48,8 +49,8 @@ def plot_trace(
rug : bool
If True adds a rugplot. Defaults to False. Ignored for 2D KDE. Only affects continuous
variables.
- lines : tuple
- Tuple of (var_name, {'coord': selection}, [line, positions]) to be overplotted as
+ lines : tuple or list
+ list of tuple of (var_name, {'coord': selection}, [line_positions]) to be overplotted as
vertical lines on the density and horizontal lines on the trace.
combined : bool
Flag for combining multiple chains into a single line. If False (default), chains will be
@@ -124,6 +125,21 @@ def plot_trace(
_, axes = plt.subplots(len(plotters), 2, squeeze=False, figsize=figsize, **backend_kwargs)
+ # Check the input for lines
+ if lines is not None:
+ all_var_names = set(plotter[0] for plotter in plotters)
+
+ invalid_var_names = set()
+ for line in lines:
+ if line[0] not in all_var_names:
+ invalid_var_names.add(line[0])
+ if invalid_var_names:
+ warnings.warn(
+ "A valid var_name should be provided, found {} expected from {}".format(
+ invalid_var_names, all_var_names
+ )
+ )
+
for idx, (var_name, selection, value) in enumerate(plotters):
value = np.atleast_2d(value)
@@ -219,6 +235,10 @@ def plot_trace(
line_values = [vlines]
else:
line_values = np.atleast_1d(vlines).ravel()
+ if not np.issubdtype(line_values.dtype, np.number):
+ raise ValueError(
+ "line-positions should be numeric, found {}".format(line_values)
+ )
axes[idx, 0].vlines(line_values, *ylims[0], colors="black", linewidth=1.5, alpha=0.75)
axes[idx, 1].hlines(
line_values, *xlims[1], colors="black", linewidth=1.5, alpha=trace_kwargs["alpha"]
| diff --git a/arviz/tests/test_plots_matplotlib.py b/arviz/tests/test_plots_matplotlib.py
--- a/arviz/tests/test_plots_matplotlib.py
+++ b/arviz/tests/test_plots_matplotlib.py
@@ -156,6 +156,21 @@ def test_plot_trace_max_subplots_warning(models):
assert axes.shape
[email protected]("kwargs", [{"var_names": ["mu", "tau"], "lines": [("hey", {}, [1])]}])
+def test_plot_trace_invalid_varname_warning(models, kwargs):
+ with pytest.warns(UserWarning, match="valid var.+should be provided"):
+ axes = plot_trace(models.model_1, **kwargs)
+ assert axes.shape
+
+
[email protected](
+ "bad_kwargs", [{"var_names": ["mu", "tau"], "lines": [("mu", {}, ["hey"])]}]
+)
+def test_plot_trace_bad_lines_value(models, bad_kwargs):
+ with pytest.raises(ValueError, match="line-positions should be numeric"):
+ plot_trace(models.model_1, **bad_kwargs)
+
+
@pytest.mark.parametrize("model_fits", [["model_1"], ["model_1", "model_2"]])
@pytest.mark.parametrize(
"args_expected",
@@ -701,7 +716,6 @@ def test_plot_posterior_point_estimates(models, point_estimate):
"kwargs", [{"insample_dev": False}, {"plot_standard_error": False}, {"plot_ic_diff": False}]
)
def test_plot_compare(models, kwargs):
-
model_compare = compare({"Model 1": models.model_1, "Model 2": models.model_2})
axes = plot_compare(model_compare, **kwargs)
| plot_trace lines is unclear and it may yield unexpected results
**Describe the bug**
The argument `lines` for the function `plot_trace` can give unexpected results. Moreover, the documentation is a bit nebulous.
**To Reproduce**
A toy example is defined
```python
import pymc3 as pm
import arviz as az
import numpy as np
# fake data
mu_real = 0
sigma_real = 1
n_samples = 150
Y = np.random.normal(loc=mu_real, scale=sigma_real, size=n_samples)
with pm.Model() as model:
mu = pm.Normal('mu', mu=0, sigma=10)
sigma = pm.HalfNormal('sigma', sigma=10)
likelihood = pm.Normal('likelihood', mu=mu, sigma=sigma, observed=Y)
trace = pm.sample()
```
As per [documentation](https://arviz-devs.github.io/arviz/generated/arviz.plot_trace.html#arviz.plot_trace), the argument `lines` accepts a tuple in the form `(var_name, {โcoordโ: selection}, [line, positions])`. So, the command
```python
az.plot_trace(trace, lines=(('mu', {}, mu_real),))
```
yields correctly

I can also pass a list of tuples or a list of tuples and lists and it will work fine:
```
az.plot_trace(trace, lines=[('mu', {}, mu_real)]) # list of tuples
az.plot_trace(trace, lines=[['mu', {}, mu_real]]) # list of lists
az.plot_trace(trace, lines=[['mu', {}, mu_real], ('sigma', {}, sigma_real)]) # list of lists and tuples
```
however, I cannot pass a simple tuple because I will get a `KeyError: 0`
```python
az.plot_trace(trace, lines=(['mu', {}, mu_real]))
az.plot_trace(trace, lines=(('mu', {}, mu_real)))
```
Also, I can pass a variable or coordinate name that do not exist and Arviz will not complain---but not lines will be plotted (here I would expect a warning)
```python
az.plot_trace(trace, lines=[('hey', {}, mu_real)])
az.plot_trace(trace, lines=[('mu', {'hey'}, mu_real)])
```

The weird behavior happens when I pass a string:
```python
az.plot_trace(trace, lines=[('mu', {}, 'hey')])
```

**Expected behavior**
The [documentation](https://arviz-devs.github.io/arviz/generated/arviz.plot_trace.html#arviz.plot_trace) could be improved and the function could check the inputs. In addition to what described above, the placeholder `[line, positions]` in `(var_name, {โcoordโ: selection}, [line, positions])` should be something like `[line_positions]` otherwise one may think (like myself :) ) that two values should be inserted (one for `line` and one for `positions`).
**Additional context**
I am using Win10, fresh conda environment with PyMC3 and Arviz from master.
Possibly related https://github.com/pymc-devs/pymc3/issues/3495, https://github.com/pymc-devs/pymc3/issues/3497
| Thanks for the feedback.
We are on our way to update the interface a bit so I hope we can fix this issue at the same time and the usage would be intuitive.
Thank you for your good work ๐
@ahartikainen @OriolAbril
I think the error is caused due to following:
https://github.com/arviz-devs/arviz/blob/0774d13979317cf4d22bb995ee298de4804432d9/arviz/plots/backends/matplotlib/traceplot.py#L217-L225
I would check for the list, else raise the ValueError. Please verify.
Sounds gook, one option for checking the list is to check the dtype of `line_values` after the atleast_1d to make sure the array contains numeric values. One possibility is to follow this [SO answer](https://stackoverflow.com/questions/29518923/numpy-asarray-how-to-check-up-that-its-result-dtype-is-numeric).
Okay, I'd go with that. Also, I'd add the warning for the case of variable or coordinate name that do not exist (as pointed in issue description)
> Also, I can pass a variable or coordinate name that do not exist and Arviz will not complain---but > not lines will be plotted (here I would expect a warning)
> ```
> az.plot_trace(trace, lines=[('hey', {}, mu_real)])
> az.plot_trace(trace, lines=[('mu', {'hey'}, mu_real)])
> ``` | 2020-02-17T22:14:31.000 | -1.0 | [
"arviz/tests/test_plots_matplotlib.py::test_plot_trace_bad_lines_value[bad_kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace_invalid_varname_warning[kwargs0]"
] | [
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[None]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_cumulative[limits2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_energy_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_discrete[True-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_exception[var_names2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-0.2-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_short_chain",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_bad_coords[draw]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-3-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected3-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0.1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected2-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected6-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[rank]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_energy[hist]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_point_estimates[mean]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad[scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin[var_names2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_no_sample_stats",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-3-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_khat_bad_input",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_scipy[limits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_exception[None]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_bad_coords[chain]",
"arviz/tests/test_plots_matplotlib.py::test_plot_energy[kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_ax[scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_quantiles[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_cov[x1]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_scipy[limits2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_no_sample_stats",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_no_divergences",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-0.2-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin_layout",
"arviz/tests/test_plots_matplotlib.py::test_plot_khat_annotate",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0.1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad_ax",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_2var[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs8]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_point_estimates[median]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected0-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_bad_coords[draw]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_var_names[None]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected4-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-0.2-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected5-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_cov[x2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist_2d_kde[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected3-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs11]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin_ax",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-3-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_2var[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_discrete[False-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected0-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_cumulative[limits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs9]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs8]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs10]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0.1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_bad_kwargs",
"arviz/tests/test_plots_matplotlib.py::test_plot_joint_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad[cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_var_names[mu]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist_2d_kde[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected4-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_elpd_one_model",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad[kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_scipy[limits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_quantiles[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_no_divergences",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_ax[kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin[mu]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_grid",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-3-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-0.2-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin[None]",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_combined",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_exception[mu]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_incompatible_args",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_2var[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-3-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist_2d_kde[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs12]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_divergences_warning[True]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_cumulative[limits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_bad[model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[normal]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace_max_subplots_warning",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_bad[model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_rope_exception",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[minmax]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_ax[cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0.1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs9]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-0.2-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected5-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected6-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0.1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0.1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_quantiles[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_bad_coords[chain]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected1-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_uncombined",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_divergences_warning[False]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-0.2-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_inference_data",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-3-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected1-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_raises_valueerror",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_evolution",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected2-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs8]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_single_value",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_bad_kind",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_var_names[var_names2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_point_estimates[mode]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_cov[x0]"
] | 1076 | "['929']" |
haddocking/arctic3d | haddocking__arctic3d-243 | 8cf368227787bb43f0936e36269925d908e31e72 | diff --git a/src/arctic3d/cli.py b/src/arctic3d/cli.py
index 450572a..e2da9aa 100644
--- a/src/arctic3d/cli.py
+++ b/src/arctic3d/cli.py
@@ -111,6 +111,14 @@ argument_parser.add_argument(
default="average",
)
+argument_parser.add_argument(
+ "--numbering",
+ help="what to renumber while extracting the best pdb files",
+ type=str,
+ default="pdb",
+ choices=["pdb", "resi"],
+)
+
def load_args(arguments):
"""
@@ -166,6 +174,7 @@ def main(
ligand,
linkage_strategy,
threshold,
+ numbering,
log_level="DEBUG",
):
"""Main function."""
@@ -257,6 +266,7 @@ def main(
pdb_to_use=pdb_to_use,
chain_to_use=chain_to_use,
pdb_data=pdb_data_path,
+ numbering=numbering,
)
if pdb_f is None:
diff --git a/src/arctic3d/modules/pdb.py b/src/arctic3d/modules/pdb.py
index 513716f..83a0043 100644
--- a/src/arctic3d/modules/pdb.py
+++ b/src/arctic3d/modules/pdb.py
@@ -61,7 +61,7 @@ def get_cif_dict(cif_name):
return cif_dict
-def get_numbering_dict(pdb_id, cif_dict, uniprot_id, chain_id):
+def get_numbering_dict(pdb_id, cif_dict, uniprot_id, chain_id, key="pdb"):
"""
gets the numbering correspondence between the pdb file and the uniprot
sequence from the cif dict.
@@ -76,12 +76,15 @@ def get_numbering_dict(pdb_id, cif_dict, uniprot_id, chain_id):
uniprot ID to be used (many IDs may exist in the .cif file)
chain_id : str
chain ID to be used
+ key : str
+ key to use for the numbering dict, either "uniprot" or "pdb"
Returns
-------
numbering_dict : dict
- pdb-resid : uniprot-resid dictionary
- Example : {"GLY-A-16" : 20, "TYR-A-17" : 21, ... }
+ pdb-resid : key-value dictionary
+ Example (key=pdb) : {"GLY-A-16" : 20, "TYR-A-17" : 21, ... }
+ Example (key=uniprot) : {20 : "GLY-A-16", 21 : "TYR-A-17", ... }
"""
atomsite_dict = cif_dict[pdb_id.upper()]["_atom_site"]
numbering_dict = {}
@@ -102,12 +105,75 @@ def get_numbering_dict(pdb_id, cif_dict, uniprot_id, chain_id):
)
unp_num = atomsite_dict["pdbx_sifts_xref_db_num"][resid]
if residue_key != prev_residue_key: # not a duplicate entry
- numbering_dict[residue_key] = unp_num
+ if key == "pdb":
+ numbering_dict[residue_key] = unp_num
+ elif key == "uniprot":
+ numbering_dict[unp_num] = residue_key
+ else:
+ raise ValueError(f"key {key} not recognized")
prev_residue_key = residue_key
# log.debug(f"numbering dict {numbering_dict}")
return numbering_dict
+def renumber_interfaces_from_cif(
+ pdb_id, uniprot_id, chain_id, interface_residues
+):
+ """
+ Renumbers a list of interfaces based on the information coming from the
+ corresponding updated cif file.
+
+ Parameters
+ ----------
+ pdb_id : str
+ PDB ID
+ uniprot_id : str
+ uniprot ID to be used
+ chain_id : str
+ chain ID to be used
+ interfaces_residues : list
+ list of interfaces residues
+ """
+
+ cif_fname = Path(f"{pdb_id}_updated.cif")
+ if not cif_fname.is_file():
+ fetch_updated_cif(pdb_id, cif_fname)
+ cif_dict = get_cif_dict(cif_fname)
+
+ # retrieve mapping
+ numbering_dict = get_numbering_dict(
+ pdb_id, cif_dict, uniprot_id, chain_id, key="uniprot"
+ )
+ # log.debug(f"numbering_dict {numbering_dict}")
+ if any(numbering_dict):
+ unique_resids = set(
+ value for values in interface_residues.values() for value in values
+ )
+ renum_residues = {} # dictionary of renumbered residues
+ for residue in unique_resids:
+ str_res = str(residue)
+ if str_res in numbering_dict.keys():
+ # log.debug(f"Residue {residue} not found in cif file")
+ int_residue = int(numbering_dict[str_res].split("-")[2])
+ renum_residues[residue] = int_residue
+ else:
+ # log.debug(f"Residue {residue} not found in cif file")
+ renum_residues[residue] = None
+ # renumbering interfaces
+ renum_interfaces = {}
+ for interface, residues in interface_residues.items():
+ renum_residues_list = []
+ for residue in residues:
+ if residue is not None:
+ renum_residues_list.append(renum_residues[residue])
+ renum_interfaces[interface] = renum_residues_list
+ else:
+ log.info(f"Renumbering failed for pdb {pdb_id}-{chain_id}")
+ renum_interfaces = None
+ # log.debug(f"renum_interfaces {renum_interfaces}")
+ return renum_interfaces, cif_fname
+
+
def renumber_pdb_from_cif(pdb_id, uniprot_id, chain_id, pdb_fname):
"""
Renumbers a pdb file based on the information coming from the corresponding
@@ -135,7 +201,9 @@ def renumber_pdb_from_cif(pdb_id, uniprot_id, chain_id, pdb_fname):
cif_dict = get_cif_dict(cif_fname)
# retrieve mapping
- numbering_dict = get_numbering_dict(pdb_id, cif_dict, uniprot_id, chain_id)
+ numbering_dict = get_numbering_dict(
+ pdb_id, cif_dict, uniprot_id, chain_id, key="pdb"
+ )
# we do not check if all residues in pdb_fname have
# been correctly renumbered
@@ -368,7 +436,7 @@ def validate_api_hit(
fetch_list,
resolution_cutoff=4.0,
coverage_cutoff=0.0,
- max_pdb_num=10,
+ max_pdb_num=20,
):
"""
Validate PDB fetch request file.
@@ -422,7 +490,7 @@ def validate_api_hit(
log.info(f"Found {len(pdbs_to_fetch)} valid PDBs to fetch")
# downloading a list of good pdbs
validated_pdbs = fetch_pdb_files(pdbs_to_fetch[:max_pdb_num])
- log.info(f"Found {len(pdbs_to_fetch)} valid PDBs")
+ log.info(f"Fetched {len(validated_pdbs)} valid PDBs")
return validated_pdbs
@@ -473,7 +541,9 @@ def unlink_files(suffix="pdb", to_exclude=None):
fpath.unlink()
-def get_maxint_pdb(validated_pdbs, interface_residues, uniprot_id):
+def get_maxint_pdb(
+ validated_pdbs, interface_residues, uniprot_id, numbering="pdb"
+):
"""
Get PDB ID that retains the most interfaces.
@@ -485,15 +555,8 @@ def get_maxint_pdb(validated_pdbs, interface_residues, uniprot_id):
Dictionary of all the interfaces (each one with its uniprot ID as key)
uniprot_id : str
Uniprot ID
-
- Returns
- -------
- pdb_f : Path or None
- Path to PDB file.
- hit : dict or None
- Interface API hit.
- filtered_interfaces : dict or None
- Dictionary of the retained and filtered interfaces.
+ numbering : str
+ what to renumber? 'pdb' for pdb files, 'resi' for interface residues
"""
log.info("Selecting pdb retaining the most interfaces")
cif_f, pdb_f, hit, filtered_interfaces = None, None, None, None
@@ -502,31 +565,42 @@ def get_maxint_pdb(validated_pdbs, interface_residues, uniprot_id):
for curr_pdb, curr_hit in validated_pdbs:
chain_id = curr_hit["chain_id"]
pdb_id = curr_hit["pdb_id"]
+
# refactor renumbering
tidy_pdb_f = preprocess_pdb(curr_pdb, chain_id)
- curr_renum_pdb_f, curr_cif_f = renumber_pdb_from_cif(
- pdb_id, uniprot_id, chain_id, tidy_pdb_f
- )
- tidy_pdb_f.unlink()
- if curr_renum_pdb_f is None:
- continue
+ if numbering == "pdb": # renumber the pdb files
+ curr_pdb_f, curr_cif_f = renumber_pdb_from_cif(
+ pdb_id, uniprot_id, chain_id, tidy_pdb_f
+ )
+ curr_interface_residues = interface_residues
+ elif numbering == "resi": # renumber the interface residues
+ curr_pdb_f = tidy_pdb_f
+ (
+ curr_interface_residues,
+ curr_cif_f,
+ ) = renumber_interfaces_from_cif(
+ pdb_id, uniprot_id, chain_id, interface_residues
+ )
+ else:
+ raise ValueError(f"Unknown numbering option: {numbering}")
# load pdb file. If there is an error, skip to the next one
try:
- mdu = mda.Universe(curr_renum_pdb_f)
+ mdu = mda.Universe(curr_pdb_f)
except Exception as e:
- log.error(f"Error loading {curr_renum_pdb_f}: {e}")
+ log.error(f"Error loading {curr_pdb_f}: {e}")
continue
+
selection_string = f"name CA and chainID {chain_id}"
pdb_resids = mdu.select_atoms(selection_string).resids
tmp_filtered_interfaces = filter_interfaces(
- interface_residues, pdb_resids
+ curr_interface_residues, pdb_resids
)
curr_nint = len(tmp_filtered_interfaces)
if curr_nint > max_nint: # update "best" hit
max_nint = curr_nint
filtered_interfaces = tmp_filtered_interfaces.copy()
- pdb_f = curr_renum_pdb_f
+ pdb_f = curr_pdb_f
cif_f = curr_cif_f
hit = curr_hit
# unlink pdb files
@@ -536,7 +610,6 @@ def get_maxint_pdb(validated_pdbs, interface_residues, uniprot_id):
if max_nint != 0:
log.info(f"filtered_interfaces {filtered_interfaces}")
log.info(f"pdb {pdb_f} retains the most interfaces ({max_nint})")
-
return pdb_f, hit, filtered_interfaces
@@ -581,6 +654,7 @@ def get_best_pdb(
pdb_to_use=None,
chain_to_use=None,
pdb_data=None,
+ numbering="pdb",
):
"""
Get best PDB ID.
@@ -597,6 +671,8 @@ def get_best_pdb(
Chain id to be used.
pdb_data : Path or None
pdb json file for offline mode.
+ numbering : str (default pdb)
+ what to renumber, either the pdb files or the interface residues
Returns
-------
@@ -632,7 +708,7 @@ def get_best_pdb(
validated_pdbs = validate_api_hit(pdb_list)
pdb_f, top_hit, filtered_interfaces = get_maxint_pdb(
- validated_pdbs, interface_residues, uniprot_id
+ validated_pdbs, interface_residues, uniprot_id, numbering=numbering
)
if pdb_f is None:
| diff --git a/tests/test_cli.py b/tests/test_cli.py
index 996733c..988d26e 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -9,20 +9,21 @@ def test_cli_empty():
target_uniprot = "P23804"
start_cwd = os.getcwd()
main(
- target_uniprot,
- None,
- None,
- None,
- None,
- None,
- None,
- None,
- None,
- None,
- None,
- None,
- None,
- None,
+ input_arg=target_uniprot,
+ db=None,
+ interface_file=None,
+ out_partner=None,
+ out_pdb=None,
+ pdb_to_use=None,
+ chain_to_use=None,
+ run_dir=None,
+ interface_data=None,
+ pdb_data=None,
+ full=None,
+ ligand=None,
+ linkage_strategy=None,
+ threshold=None,
+ numbering=None,
)
os.chdir(start_cwd)
exp_dir = Path(f"arctic3d-{target_uniprot}")
diff --git a/tests/test_pdb.py b/tests/test_pdb.py
index 32d1f77..59c43a6 100644
--- a/tests/test_pdb.py
+++ b/tests/test_pdb.py
@@ -11,6 +11,7 @@ from arctic3d.modules.pdb import (
keep_atoms,
occ_pdb,
renumber_pdb_from_cif,
+ renumber_interfaces_from_cif,
selchain_pdb,
selmodel_pdb,
tidy_pdb,
@@ -197,7 +198,7 @@ def test_get_maxint_pdb_empty():
def test_get_maxint_pdb(good_hits, example_interfaces):
- """Test get_maxint_pdb."""
+ """Test get_maxint_pdb with implicit pdb numbering."""
validated_pdbs = validate_api_hit(good_hits)
pdb_f, top_hit, filtered_interfaces = get_maxint_pdb(
validated_pdbs, example_interfaces, "P00760"
@@ -208,6 +209,20 @@ def test_get_maxint_pdb(good_hits, example_interfaces):
assert filtered_interfaces == {"P01024": [103, 104, 105]}
+def test_get_maxint_pdb_resi(good_hits, example_interfaces):
+ """Test get_maxint_pdb with resi numbering."""
+ validated_pdbs = validate_api_hit(good_hits)
+ pdb_f, top_hit, filtered_interfaces = get_maxint_pdb(
+ validated_pdbs, example_interfaces, "P00760", numbering="resi"
+ )
+ # here the pdb is not renumbered
+ assert pdb_f.name == "4xoj-model1-atoms-A-occ-tidy.pdb"
+ assert top_hit["pdb_id"] == "4xoj"
+ assert top_hit["chain_id"] == "A"
+ # here the interfaces are renumbered, so the residues change
+ assert filtered_interfaces == {"P01024": [95, 96, 97]}
+
+
def test_filter_pdb_list(good_hits):
"""Test filter_pdb_list."""
observed_red_list = filter_pdb_list(good_hits, pdb_to_use="1abc")
@@ -263,3 +278,20 @@ def test_renumber_pdb_from_cif(inp_pdb_3psg):
assert lines[726][13:26] == "CA SER A 50"
pdb_renum_fname.unlink()
cif_fname.unlink()
+
+
+def test_renumber_interfaces_from_cif(inp_pdb_3psg):
+ """Test renumber_interfaces_from_cif."""
+ interfaces = {"P00441": [85, 137, 138]}
+ renum_interfaces, cif_fname = renumber_interfaces_from_cif(
+ pdb_id="3psg",
+ uniprot_id="P00791",
+ chain_id="A",
+ interface_residues=interfaces,
+ )
+ assert renum_interfaces == {"P00441": [26, 78, 79]}
+ # NB : this result is wrong in this case, as the pdb contains two different
+ # records with equal chain-resid, with two different insertion codes.
+ # It's not possible to extract the correct residues in this case, but
+ # this should be a highly unlikely case.
+ cif_fname.unlink()
| add option to renumber interfaces instead of pdb files
There are some (~20) proteins whose sequences contain more than 10K amino acids. In this case, renumbering the pdb file according to the canonical numbering will be wrong, since the result will be not parsable. In order to circumvent this problem, it is necessary to give the possibility to renumber the interface residues instead of the pdb files while looking for the best available pdb (the one maximizing the retained interfaces).
This renumbering is mostly safe, except for the case in which the pdb has multiple atomname-chainid-resid records (for example when dealing with pdb with insertion codes (see #238 )). In that case this residue-based renumbering is going to fail miserably | 2023-04-06 15:21:50+00:00 | -1.0 | [
"tests/test_cli.py::test_cli_empty",
"tests/test_pdb.py::test_selchain_pdb",
"tests/test_pdb.py::test_tidy_pdb",
"tests/test_pdb.py::test_occ_pdb",
"tests/test_pdb.py::test_keep_atoms",
"tests/test_pdb.py::test_selmodel_pdb",
"tests/test_pdb.py::test_validate_api_hit",
"tests/test_pdb.py::test_validate_api_hit_nmr",
"tests/test_pdb.py::test_get_maxint_pdb_empty",
"tests/test_pdb.py::test_get_maxint_pdb",
"tests/test_pdb.py::test_get_maxint_pdb_resi",
"tests/test_pdb.py::test_filter_pdb_list",
"tests/test_pdb.py::test_pdb_data",
"tests/test_pdb.py::test_get_numbering_dict",
"tests/test_pdb.py::test_renumber_pdb_from_cif",
"tests/test_pdb.py::test_renumber_interfaces_from_cif"
] | [] | |||
pints-team/pints | pints-team__pints-1499 | 4323e8b25a8874799cf4086e6d3cf1a1bcbf0bac | diff --git a/README.md b/README.md
index 9c8b3bf0..69b30e83 100644
--- a/README.md
+++ b/README.md
@@ -76,8 +76,11 @@ To see what's changed in the latest release, see the [CHANGELOG](https://github.
## Contributing to PINTS
-If you'd like to help us develop PINTS by adding new methods, writing documentation, or fixing embarassing bugs, please have a look at these [guidelines](https://github.com/pints-team/pints/blob/main/CONTRIBUTING.md) first.
+There are lots of ways to contribute to PINTS development, and anyone is free to join in!
+For example, you can report problems or make feature requests on the [issues](https://github.com/pints-team/pints/issues) pages.
+Similarly, if you want to contribute documentation or code you can tell us your idea on this page, and then provide a pull request for review.
+Because PINTS is a big project, we've written extensive [contribution guidelines](https://github.com/pints-team/pints/blob/master/CONTRIBUTING.md) to help standardise the code -- but don't worry, this will become clear during review.
## License
diff --git a/docs/source/log_priors.rst b/docs/source/log_priors.rst
index 03a5291c..65c1bb0c 100644
--- a/docs/source/log_priors.rst
+++ b/docs/source/log_priors.rst
@@ -23,6 +23,7 @@ Overview:
- :class:`HalfCauchyLogPrior`
- :class:`InverseGammaLogPrior`
- :class:`LogNormalLogPrior`
+- :class:`LogUniformLogPrior`
- :class:`MultivariateGaussianLogPrior`
- :class:`NormalLogPrior`
- :class:`StudentTLogPrior`
@@ -48,6 +49,8 @@ Overview:
.. autoclass:: LogNormalLogPrior
+.. autoclass:: LogUniformLogPrior
+
.. autoclass:: MultivariateGaussianLogPrior
.. autoclass:: NormalLogPrior
diff --git a/pints/__init__.py b/pints/__init__.py
index e6448e64..1c1591d5 100644
--- a/pints/__init__.py
+++ b/pints/__init__.py
@@ -100,6 +100,7 @@ from ._log_priors import (
HalfCauchyLogPrior,
InverseGammaLogPrior,
LogNormalLogPrior,
+ LogUniformLogPrior,
MultivariateGaussianLogPrior,
NormalLogPrior,
StudentTLogPrior,
diff --git a/pints/_log_priors.py b/pints/_log_priors.py
index eb669ab9..77edbac6 100644
--- a/pints/_log_priors.py
+++ b/pints/_log_priors.py
@@ -748,6 +748,74 @@ class LogNormalLogPrior(pints.LogPrior):
s=self._scale, size=(n, 1))
+class LogUniformLogPrior(pints.LogPrior):
+ r"""
+ Defines a log-uniform prior over a given range.
+
+ The range includes the lower and upper boundaries, so that any
+ point ``x`` with a non-zero prior must have ``0 < a <= x < b``.
+
+ In 1D this has pdf
+
+ .. math::
+ f(x|a,b)=\begin{cases}0,&\text{if }x\not\in
+ [a,b]\\\frac{1}{x \log(\frac{b}{a})}
+ ,&\text{if }x\in[a,b]\end{cases}.
+
+ A random variable :math:`X` distributed according to this pdf has
+ expectation
+
+ .. math::
+ \mathrm{E}(X)=\frac{b-a}{\log(b/a)}.
+
+ For example, to create a prior with :math:`x\in[1e-2,1e2]`, use::
+
+ p = pints.LogUniformLogPrior(1e-2, 1e2)
+
+ Extends :class:`LogPrior`.
+ """
+ def __init__(self, a, b):
+ if a <= 0:
+ raise ValueError("a must be > 0")
+ if b <= a:
+ raise ValueError("b must be > a > 0")
+
+ self._a = a
+ self._b = b
+ #constant for S1 evaluation
+ self._c = np.divide(1, np.log(np.divide(b, a)))
+
+ def __call__(self, x):
+ return scipy.stats.loguniform.logpdf(x, self._a, self._b)
+
+ def cdf(self, x):
+ """ See :meth:`LogPrior.cdf()`. """
+ return scipy.stats.loguniform.cdf(x, self._a, self._b)
+
+ def icdf(self, p):
+ """ See :meth:`LogPrior.icdf()`. """
+ return scipy.stats.loguniform.ppf(p, self._a, self._b)
+
+ def evaluateS1(self, x):
+ """ See :meth:`LogPrior.evaluateS1()`. """
+ dp = np.array(- 1 / x)
+ # Set values outside limits to nan
+ dp[(np.asarray(x) < self._a) | (np.asarray(x) > self._b)] = np.nan
+ return self(x), dp
+
+ def mean(self):
+ """ See :meth:`LogPrior.mean()`. """
+ return scipy.stats.loguniform.mean(self._a, self._b)
+
+ def n_parameters(self):
+ """ See :meth:`LogPrior.n_parameters()`. """
+ return 1
+
+ def sample(self, n=1):
+ """ See :meth:`LogPrior.sample()`. """
+ return scipy.stats.loguniform.rvs(self._a, self._b, size=(n, 1))
+
+
class MultivariateGaussianLogPrior(pints.LogPrior):
r"""
Defines a multivariate Gaussian (log) prior with a given ``mean`` and
| diff --git a/pints/tests/test_log_priors.py b/pints/tests/test_log_priors.py
index 499ac341..23949fd2 100755
--- a/pints/tests/test_log_priors.py
+++ b/pints/tests/test_log_priors.py
@@ -595,6 +595,34 @@ class TestPrior(unittest.TestCase):
mean = np.mean(samples1).item()
self.assertTrue(9. < mean < 11.)
+ def test_log_uniform_prior(self):
+
+ #Test input parameters
+ self.assertRaises(ValueError, pints.LogUniformLogPrior, 0, 1)
+ self.assertRaises(ValueError, pints.LogUniformLogPrior, 1, 1)
+
+ a = 1e-2
+ b = 1e2
+
+ p = pints.LogUniformLogPrior(a, b)
+
+ #all values below were calculated separately (not by scipy)
+ self.assertAlmostEqual(p.mean(), 10.856276311376536)
+
+ #test n_parameters
+ self.assertEqual(p.n_parameters(), 1)
+
+ points = [0.1, 63.0]
+ vals = [0.08225828662619909, -6.36346153275938]
+ dvals = [-10.0, -0.015873015873015872]
+
+ for point, val, dval in zip(points, vals, dvals):
+ test_val_1, test_dval = p.evaluateS1(point)
+ test_val_2 = p(point)
+ self.assertEqual(test_val_1, test_val_2)
+ self.assertAlmostEqual(test_val_1, val)
+ self.assertAlmostEqual(test_dval, dval)
+
def test_log_normal_prior(self):
# Test input parameters
@@ -657,6 +685,21 @@ class TestPrior(unittest.TestCase):
self.assertAlmostEqual(pints_val, scipy_val)
self.assertAlmostEqual(pints_deriv[0], hand_calc_deriv)
+ def test_log_uniform_prior_cdf_icdf(self):
+ p1 = pints.LogUniformLogPrior(1e-2, 1e2)
+ self.assertAlmostEqual(p1.cdf(0.1), 0.25)
+ self.assertAlmostEqual(p1.cdf(10), 0.75)
+ self.assertAlmostEqual(p1.icdf(0.25), 0.1)
+ self.assertAlmostEqual(p1.icdf(0.75), 10.0)
+
+ def test_log_uniform_prior_sampling(self):
+ p1 = pints.LogUniformLogPrior(1e-2, 1e2)
+ samples = p1.sample(1000000)
+ mean = p1.mean()
+ sample_mean = np.mean(samples)
+ self.assertEqual(len(samples), 1000000)
+ self.assertLessEqual(np.abs(sample_mean - mean), 0.1)
+
def test_log_normal_prior_cdf_icdf(self):
p1 = pints.LogNormalLogPrior(-3.5, 7.7)
self.assertAlmostEqual(p1.cdf(1.1), 0.6797226585187124)
| Add reciprocal distribution prior (LogUniformLogPrior) to help estimate parameters that vary over multiple orders of magnitude
Hi, thanks for a great package! It would be great to have a [reciprocal/log uniform](https://en.wikipedia.org/wiki/Reciprocal_distribution) prior. It should be straightforward since it's implemented in `scipy.stats`, and I would be happy to implement it. | 2023-10-18 19:51:46+00:00 | -1.0 | [
"pints/tests/test_log_priors.py::TestPrior::test_log_uniform_prior",
"pints/tests/test_log_priors.py::TestPrior::test_log_uniform_prior_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_log_uniform_prior_sampling"
] | [
"pints/tests/test_log_priors.py::TestPrior::test_beta_prior",
"pints/tests/test_log_priors.py::TestPrior::test_beta_prior_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_cauchy_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_cauchy_prior",
"pints/tests/test_log_priors.py::TestPrior::test_cauchy_prior_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_composed_prior",
"pints/tests/test_log_priors.py::TestPrior::test_composed_prior_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_composed_prior_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_exponential_prior",
"pints/tests/test_log_priors.py::TestPrior::test_exponential_prior_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_exponential_prior_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_gamma_prior",
"pints/tests/test_log_priors.py::TestPrior::test_gamma_prior_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_gamma_prior_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_gaussian_prior",
"pints/tests/test_log_priors.py::TestPrior::test_gaussian_prior_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_gaussian_prior_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_half_cauchy_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_half_cauchy_prior",
"pints/tests/test_log_priors.py::TestPrior::test_half_cauchy_prior_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_inverse_gamma_prior",
"pints/tests/test_log_priors.py::TestPrior::test_inverse_gamma_prior_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_inverse_gamma_prior_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_log_normal_prior",
"pints/tests/test_log_priors.py::TestPrior::test_log_normal_prior_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_log_normal_prior_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_multivariate_normal_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_multivariate_normal_prior",
"pints/tests/test_log_priors.py::TestPrior::test_multivariate_normal_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_student_t_prior",
"pints/tests/test_log_priors.py::TestPrior::test_student_t_prior_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_student_t_prior_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_truncated_gaussian_prior",
"pints/tests/test_log_priors.py::TestPrior::test_truncated_gaussian_prior_cdf_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_truncated_gaussian_prior_sampling",
"pints/tests/test_log_priors.py::TestPrior::test_uniform_prior",
"pints/tests/test_log_priors.py::TestPrior::test_uniform_prior_cdf",
"pints/tests/test_log_priors.py::TestPrior::test_uniform_prior_icdf",
"pints/tests/test_log_priors.py::TestPrior::test_uniform_prior_sampling"
] | |||
beetbox/confuse | beetbox__confuse-97 | 290a46f27eb081058ddb80afa5383b9253511f6d | diff --git a/confuse/core.py b/confuse/core.py
index 193bfc3..5015705 100644
--- a/confuse/core.py
+++ b/confuse/core.py
@@ -335,7 +335,7 @@ class ConfigView(object):
od[key] = view.get()
return od
- def get(self, template=None):
+ def get(self, template=templates.REQUIRED):
"""Retrieve the value for this view according to the template.
The `template` against which the values are checked can be
diff --git a/confuse/templates.py b/confuse/templates.py
index 640ba66..984c341 100644
--- a/confuse/templates.py
+++ b/confuse/templates.py
@@ -626,6 +626,8 @@ def as_template(value):
elif isinstance(value, float):
return Number(value)
elif value is None:
+ return Template(None)
+ elif value is REQUIRED:
return Template()
elif value is dict:
return TypeTemplate(abc.Mapping)
diff --git a/docs/index.rst b/docs/index.rst
index 64f9d69..b8ac11b 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -393,6 +393,10 @@ v1.3.0
- Break up the `confuse` module into a package. (All names should still be
importable from `confuse`.)
+- When using `None` as a template, the result is a value whose default is
+ `None`. Previously, this was equivalent to leaving the key off entirely,
+ i.e., a template with no default. To get the same effect now, use
+ `confuse.REQUIRED` in the template.
v1.2.0
''''''
| diff --git a/test/test_valid.py b/test/test_valid.py
index ec7cf6b..4ba8125 100644
--- a/test/test_valid.py
+++ b/test/test_valid.py
@@ -171,6 +171,11 @@ class AsTemplateTest(unittest.TestCase):
def test_none_as_template(self):
typ = confuse.as_template(None)
self.assertIs(type(typ), confuse.Template)
+ self.assertEqual(typ.default, None)
+
+ def test_required_as_template(self):
+ typ = confuse.as_template(confuse.REQUIRED)
+ self.assertIs(type(typ), confuse.Template)
self.assertEqual(typ.default, confuse.REQUIRED)
def test_dict_type_as_template(self):
| None as Template should use None as default
When I do this for a key that doesn't exist in `config_default.yaml`, I would expect the key to contain `None` by default.
```python
config['something'].get({
'key': None
})
```
But instead, I get:
```
confuse.NotFoundError: something.key not found
```
This is because of this line in `as_template`:
https://github.com/beetbox/confuse/blob/b82d3faacb972b8964487a648805c3d0e06c0212/confuse.py#L1718-L1719
What I would propose:
```python
# throws NotFoundError - '...' is often used to denote a missing value
config['something'].get({
'key': ...
})
# returns None
config['something'].get({
'key': None
})
``` | 2020-06-09 21:59:16+00:00 | -1.0 | [
"test/test_valid.py::AsTemplateTest::test_none_as_template",
"test/test_valid.py::AsTemplateTest::test_required_as_template"
] | [
"test/test_valid.py::ValidConfigTest::test_attribute_access",
"test/test_valid.py::ValidConfigTest::test_default_value",
"test/test_valid.py::ValidConfigTest::test_int_default_shortcut",
"test/test_valid.py::ValidConfigTest::test_int_template_shortcut",
"test/test_valid.py::ValidConfigTest::test_missing_required_value_raises_error_on_validate",
"test/test_valid.py::ValidConfigTest::test_nested_attribute_access",
"test/test_valid.py::ValidConfigTest::test_nested_dict_template",
"test/test_valid.py::ValidConfigTest::test_none_as_default",
"test/test_valid.py::ValidConfigTest::test_undeclared_key_ignored_from_input",
"test/test_valid.py::ValidConfigTest::test_undeclared_key_raises_keyerror",
"test/test_valid.py::ValidConfigTest::test_validate_individual_value",
"test/test_valid.py::ValidConfigTest::test_validate_simple_dict",
"test/test_valid.py::ValidConfigTest::test_wrong_type_raises_error_on_validate",
"test/test_valid.py::AsTemplateTest::test_concrete_float_as_template",
"test/test_valid.py::AsTemplateTest::test_concrete_int_as_template",
"test/test_valid.py::AsTemplateTest::test_concrete_string_as_template",
"test/test_valid.py::AsTemplateTest::test_dict_as_template",
"test/test_valid.py::AsTemplateTest::test_dict_type_as_template",
"test/test_valid.py::AsTemplateTest::test_enum_type_as_template",
"test/test_valid.py::AsTemplateTest::test_float_type_as_tempalte",
"test/test_valid.py::AsTemplateTest::test_list_as_template",
"test/test_valid.py::AsTemplateTest::test_list_type_as_template",
"test/test_valid.py::AsTemplateTest::test_nested_dict_as_template",
"test/test_valid.py::AsTemplateTest::test_other_type_as_template",
"test/test_valid.py::AsTemplateTest::test_plain_int_as_template",
"test/test_valid.py::AsTemplateTest::test_plain_string_as_template",
"test/test_valid.py::AsTemplateTest::test_set_as_template",
"test/test_valid.py::AsTemplateTest::test_set_type_as_template",
"test/test_valid.py::StringTemplateTest::test_check_string_type",
"test/test_valid.py::StringTemplateTest::test_pattern_matching",
"test/test_valid.py::StringTemplateTest::test_string_default_shortcut",
"test/test_valid.py::StringTemplateTest::test_string_default_value",
"test/test_valid.py::StringTemplateTest::test_string_template_shortcut",
"test/test_valid.py::StringTemplateTest::test_validate_string",
"test/test_valid.py::NumberTest::test_validate_float_as_number",
"test/test_valid.py::NumberTest::test_validate_int_as_number",
"test/test_valid.py::NumberTest::test_validate_string_as_number",
"test/test_valid.py::ChoiceTest::test_validate_bad_choice_in_dict",
"test/test_valid.py::ChoiceTest::test_validate_bad_choice_in_list",
"test/test_valid.py::ChoiceTest::test_validate_good_choice_in_dict",
"test/test_valid.py::ChoiceTest::test_validate_good_choice_in_list",
"test/test_valid.py::OneOfTest::test_default_value",
"test/test_valid.py::OneOfTest::test_validate_bad_template",
"test/test_valid.py::OneOfTest::test_validate_first_good_choice_in_list",
"test/test_valid.py::OneOfTest::test_validate_good_choice_in_list",
"test/test_valid.py::OneOfTest::test_validate_no_choice_in_list",
"test/test_valid.py::StrSeqTest::test_invalid_sequence_type",
"test/test_valid.py::StrSeqTest::test_invalid_type",
"test/test_valid.py::StrSeqTest::test_string_list",
"test/test_valid.py::StrSeqTest::test_string_tuple",
"test/test_valid.py::StrSeqTest::test_whitespace_separated_string",
"test/test_valid.py::FilenameTest::test_default_none",
"test/test_valid.py::FilenameTest::test_default_value",
"test/test_valid.py::FilenameTest::test_filename_relative_to_self",
"test/test_valid.py::FilenameTest::test_filename_relative_to_sibling",
"test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_needs_siblings",
"test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_needs_template",
"test/test_valid.py::FilenameTest::test_filename_relative_to_sibling_with_recursion",
"test/test_valid.py::FilenameTest::test_filename_relative_to_working_dir",
"test/test_valid.py::FilenameTest::test_filename_with_default_source",
"test/test_valid.py::FilenameTest::test_filename_with_file_source",
"test/test_valid.py::FilenameTest::test_filename_with_non_file_source",
"test/test_valid.py::FilenameTest::test_filename_working_dir_overrides_sibling",
"test/test_valid.py::FilenameTest::test_filename_wrong_type",
"test/test_valid.py::FilenameTest::test_missing_required_value",
"test/test_valid.py::PathTest::test_default_none",
"test/test_valid.py::PathTest::test_default_value",
"test/test_valid.py::PathTest::test_missing_required_value",
"test/test_valid.py::PathTest::test_path_value",
"test/test_valid.py::BaseTemplateTest::test_base_template_accepts_any_value",
"test/test_valid.py::BaseTemplateTest::test_base_template_required",
"test/test_valid.py::BaseTemplateTest::test_base_template_with_default",
"test/test_valid.py::TypeTemplateTest::test_correct_type",
"test/test_valid.py::TypeTemplateTest::test_default_value",
"test/test_valid.py::TypeTemplateTest::test_incorrect_type",
"test/test_valid.py::TypeTemplateTest::test_missing_required_value",
"test/test_valid.py::SequenceTest::test_dict_list",
"test/test_valid.py::SequenceTest::test_int_list",
"test/test_valid.py::SequenceTest::test_invalid_item"
] | |||
North14/avanza | North14__avanza-38 | 0f80162cb0d8ff0e0f039a2cb52d43662eafb266 | diff --git a/avanza/__init__.py b/avanza/__init__.py
index b8e9956..4528053 100644
--- a/avanza/__init__.py
+++ b/avanza/__init__.py
@@ -2,6 +2,6 @@
from .ticker import Ticker
from .search import Search
from .news import News
-from . import collection
+from .collection import Collection
from ._version import __version__
from .base import Config, Base
diff --git a/avanza/chartdata.py b/avanza/chartdata.py
index 9c2c9d2..c8d1c29 100644
--- a/avanza/chartdata.py
+++ b/avanza/chartdata.py
@@ -9,9 +9,11 @@
from .base import Base
-class ChartData(Base):
+class ChartData:
"""Grab json chartdata and output as pandas DataFrame"""
- def get_overview_chartdata(self, time_period='one_month'):
+
+ @staticmethod
+ def get_overview_chartdata(time_period='one_month'):
"""Returns chartdata from overview page
Args:
@@ -26,8 +28,8 @@ def get_overview_chartdata(self, time_period='one_month'):
time_period = time_period.upper()
path = f"{BASE_URL}{constants['paths']['CHARTDATA_OVERVIEW']}"
url = path.format(time_period)
- if self._check_time_period(time_period):
- r = self._request(url, auth=True)
+ if Base()._check_time_period(time_period):
+ r = Base()._request(url, auth=True)
if pandas_imported:
if 'absoluteSeries' in r:
data_series = []
@@ -41,7 +43,8 @@ def get_overview_chartdata(self, time_period='one_month'):
else:
raise Exception("Invalid time_period!")
- def get_distribution_chartdata(self):
+ @staticmethod
+ def get_distribution_chartdata():
"""Returns values from account distribution pie chart
Returns:
@@ -52,7 +55,7 @@ def get_distribution_chartdata(self):
Will "unpack" original drilldown
"""
url = f"{BASE_URL}{constants['paths']['CHARTDATA_DISTRIBUTION']}"
- r = self._request(url, auth=True)
+ r = Base()._request(url, auth=True)
if pandas_imported:
pie_dict_list = []
for x in r:
@@ -65,7 +68,8 @@ def get_distribution_chartdata(self):
return pandas.read_json(json.dumps(pie_dict_list))
return r
- def get_ticker_chartdata(self, orderbook_id, **kwargs):
+ @staticmethod
+ def get_ticker_chartdata(orderbook_id, **kwargs):
"""Returns chartdata from overview page
Args:
@@ -101,7 +105,7 @@ def get_ticker_chartdata(self, orderbook_id, **kwargs):
h = {"Content-Type": "application/json"}
from datetime import datetime
- r = self._request(url, params=p, headers=h, method="POST")
+ r = Base()._request(url, params=p, headers=h, method="POST")
if pandas_imported:
if 'dataPoints' in r:
data_series = r['dataPoints']
diff --git a/avanza/collection.py b/avanza/collection.py
index 5cd2aaf..100c851 100644
--- a/avanza/collection.py
+++ b/avanza/collection.py
@@ -2,147 +2,149 @@
from .base import Base
-def get_account_overview(account_id):
- """Returns information about accounts watchlists
-
- Args:
- account_id (int): id of account
-
- Returns:
- dict:
-
- Note:
- Authentication neccessary
- """
- path = f"{BASE_URL}{constants['paths']['ACCOUNT_OVERVIEW_PATH']}"
- url = path.format(account_id)
- return Base()._request(url, auth=True)
-
-
-def get_transactions(account_id=None):
- """
- Returns information about accounts watchlists
-
-
- Args:
- account_id (int): id of account
-
- Returns:
- dict:
-
- Note:
- Authentication neccessary
- """
- url = f"{BASE_URL}{constants['paths']['TRANSACTIONS_PATH']}"
- if account_id:
- return Base()._request(url.format(account_id), auth=True)
- return Base()._request(url.replace('{0:d}', ''), auth=True)
-
-
-def get_insight(**kwargs):
- """
- Returns accounts
-
- Args:
- time_period (str): time period
-
- Returns:
- dict:
-
- Note:
- Authentication neccessary
- """
- time_period = kwargs.pop('time_period', 'TODAY').upper()
- assert not kwargs
- url = f"{BASE_URL}{constants['paths']['INSIGHT']}".format(time_period)
- if Base()._check_time_period(time_period):
+class Collection:
+ @staticmethod
+ def get_account_overview(account_id):
+ """Returns information about accounts watchlists
+
+ Args:
+ account_id (int): id of account
+
+ Returns:
+ dict:
+
+ Note:
+ Authentication neccessary
+ """
+ path = f"{BASE_URL}{constants['paths']['ACCOUNT_OVERVIEW_PATH']}"
+ url = path.format(account_id)
return Base()._request(url, auth=True)
- else:
- raise Exception("Invalid time period!")
-
-
-def get_watchlists():
- """Returns information about accounts watchlists
-
- Returns:
- dict:
-
- Note:
- Authentication neccessary
- """
- url = f"{BASE_URL}{constants['paths']['WATCHLISTS_PATH']}"
- return Base()._request(url, auth=True)
-
-
-def get_positions():
- """Returns information about accounts positions
-
- Returns:
- dict:
-
- Note:
- Authentication neccessary
- """
- url = f"{BASE_URL}{constants['paths']['POSITIONS_PATH']}"
- return Base()._request(url, auth=True)
-
-
-def get_deals_and_orders():
- """Returns deals, orders and accounts
-
- Returns:
- dict:
-
- Note:
- Authentication neccessary
- """
- url = f"{BASE_URL}{constants['paths']['DEALS_AND_ORDERS_PATH']}"
- return Base()._request(url, auth=True)
+ @staticmethod
+ def get_transactions(account_id=None):
+ """
+ Returns information about accounts watchlists
+
+
+ Args:
+ account_id (int): id of account
+
+ Returns:
+ dict:
+
+ Note:
+ Authentication neccessary
+ """
+ url = f"{BASE_URL}{constants['paths']['TRANSACTIONS_PATH']}"
+ if account_id:
+ return Base()._request(url.format(account_id), auth=True)
+ return Base()._request(url.replace('{0:d}', ''), auth=True)
+
+ @staticmethod
+ def get_insight(**kwargs):
+ """
+ Returns accounts
+
+ Args:
+ time_period (str): time period
+
+ Returns:
+ dict:
+
+ Note:
+ Authentication neccessary
+ """
+ time_period = kwargs.pop('time_period', 'TODAY').upper()
+ assert not kwargs
+ url = f"{BASE_URL}{constants['paths']['INSIGHT']}".format(time_period)
+ if Base()._check_time_period(time_period):
+ return Base()._request(url, auth=True)
+ else:
+ raise Exception("Invalid time period!")
+
+ @staticmethod
+ def get_watchlists():
+ """Returns information about accounts watchlists
+
+ Returns:
+ dict:
+
+ Note:
+ Authentication neccessary
+ """
+ url = f"{BASE_URL}{constants['paths']['WATCHLISTS_PATH']}"
+ return Base()._request(url, auth=True)
-def get_feed():
- """Returns feed from Home
+ @staticmethod
+ def get_positions():
+ """Returns information about accounts positions
- Returns:
- dict:
+ Returns:
+ dict:
- Note:
- Authentication neccessary
- """
- url = f"{BASE_URL}{constants['paths']['FEED']}"
- return Base()._request(url, auth=True)
+ Note:
+ Authentication neccessary
+ """
+ url = f"{BASE_URL}{constants['paths']['POSITIONS_PATH']}"
+ return Base()._request(url, auth=True)
+ @staticmethod
+ def get_deals_and_orders():
+ """Returns deals, orders and accounts
-def get_accounts():
- """Returns accounts
+ Returns:
+ dict:
- Returns:
- dict:
+ Note:
+ Authentication neccessary
+ """
+ url = f"{BASE_URL}{constants['paths']['DEALS_AND_ORDERS_PATH']}"
+ return Base()._request(url, auth=True)
- Note:
- Authentication neccessary
- """
- url = f"{BASE_URL}{constants['paths']['ACCOUNTS']}"
- return Base()._request(url, auth=True)
+ @staticmethod
+ def get_feed():
+ """Returns feed from Home
+ Returns:
+ dict:
-def get_inspiration_list():
- """Returns inspiration list
+ Note:
+ Authentication neccessary
+ """
+ url = f"{BASE_URL}{constants['paths']['FEED']}"
+ return Base()._request(url, auth=True)
- Returns:
- dict:
- """
- url = f"{BASE_URL}{constants['paths']['INSPIRATION_LIST_PATH']}"
- return Base()._request(url)
+ @staticmethod
+ def get_accounts():
+ """Returns accounts
+ Returns:
+ dict:
-def get_account_summary():
- """Returns account summary
+ Note:
+ Authentication neccessary
+ """
+ url = f"{BASE_URL}{constants['paths']['ACCOUNTS']}"
+ return Base()._request(url, auth=True)
- Returns:
- dict:
- Note:
- Authentication neccessary
- """
- url = f"{BASE_URL}{constants['paths']['CATEGORIZED_ACCOUNTS']}"
- return Base()._request(url, auth=True)
+ @staticmethod
+ def get_inspiration_list():
+ """Returns inspiration list
+
+ Returns:
+ dict:
+ """
+ url = f"{BASE_URL}{constants['paths']['INSPIRATION_LIST_PATH']}"
+ return Base()._request(url)
+
+ @staticmethod
+ def get_account_summary():
+ """Returns account summary
+
+ Returns:
+ dict:
+ Note:
+ Authentication neccessary
+ """
+ url = f"{BASE_URL}{constants['paths']['CATEGORIZED_ACCOUNTS']}"
+ return Base()._request(url, auth=True)
diff --git a/tox.ini b/tox.ini
index aa2dd05..e1b0c4d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -35,6 +35,18 @@ commands =
TRAVIS_BRANCH TRAVIS_BUILD_WEB_URL \
TRAVIS_COMMIT TRAVIS_COMMIT_MESSAGE
+[testenv:testonly]
+description = run pytest
+basepython = python
+deps =
+ requests
+ pytest
+ coverage
+setenv =
+ {[default]setenv}
+commands =
+ coverage run --branch --source {env:PY_MODULE} -m pytest testing/noauth
+
[testenv:flake8]
description = lint with flake8
basepython = {[default]basepython}
| diff --git a/testing/noauth/test_inspiration_list.py b/testing/noauth/test_inspiration_list.py
index 2b9e7ea..aa9376d 100644
--- a/testing/noauth/test_inspiration_list.py
+++ b/testing/noauth/test_inspiration_list.py
@@ -2,6 +2,6 @@
def test_inspiration_list():
- inspiration = avanza.collection.get_inspiration_list()
+ inspiration = avanza.Collection.get_inspiration_list()
q = type(inspiration)
assert q is dict or q is list
diff --git a/testing/noauth/test_ticker_chartdata.py b/testing/noauth/test_ticker_chartdata.py
index b7ab2ab..6aa2b94 100644
--- a/testing/noauth/test_ticker_chartdata.py
+++ b/testing/noauth/test_ticker_chartdata.py
@@ -2,7 +2,7 @@
def test_ticker_chartdata():
- chartdata = avanza.ChartData().get_ticker_chartdata(3873)
+ chartdata = avanza.ChartData.get_ticker_chartdata(3873)
assert not chartdata.empty
- chartdata = avanza.ChartData().get_ticker_chartdata(3873, chart_type='OHLC')
+ chartdata = avanza.ChartData.get_ticker_chartdata(3873, chart_type='OHLC')
assert not chartdata.empty
| Use classmethods where applicable
| 2020-03-25T15:57:47.000 | -1.0 | [
"testing/noauth/test_inspiration_list.py::test_inspiration_list"
] | [] | 38 | "['36']" |
|
holoviz/panel | holoviz__panel-5774 | c03d7822fd14a7361f120cd6b146aad5429a8fb7 | diff --git a/panel/io/location.py b/panel/io/location.py
--- a/panel/io/location.py
+++ b/panel/io/location.py
@@ -38,7 +38,7 @@ class Location(Syncable):
hostname = param.String(readonly=True, doc="""
hostname in window.location e.g. 'panel.holoviz.org'""")
- pathname = param.String(regex=r"^$|[\/].*$", doc="""
+ pathname = param.String(regex=r"^$|[\/]|srcdoc$", doc="""
pathname in window.location e.g. '/user_guide/Interact.html'""")
protocol = param.String(readonly=True, doc="""
| diff --git a/panel/tests/io/test_location.py b/panel/tests/io/test_location.py
--- a/panel/tests/io/test_location.py
+++ b/panel/tests/io/test_location.py
@@ -161,3 +161,6 @@ def app():
assert loc.hostname == 'localhost'
assert loc.pathname == '/'
assert loc.search == '?foo=1'
+
+def test_iframe_srcdoc_location():
+ Location(pathname="srcdoc")
| ValueError when embedding Panel Server App in Iframe
#### Description of expected behavior and the observed behavior
The [Location regex `"^$|[\/].*$"`](https://github.com/holoviz/panel/blob/ff43d36e14ff84238e0450bd56c7541ca5a0147a/panel/io/location.py#L41) fails when injecting a bokeh app in an iframe using `iframe.srcdoc`
I'm getting the following error in my Python logs:
```python-traceback
ValueError: String parameter 'pathname' value 'srcdoc' does not match regex '^$|[\\/].*$'.
```
However, the panel app seems to be rendered ok.
#### Complete, minimal, self-contained example code that reproduces the issue
- Webpage with iframe to hold panel/bokeh app containing the following:
```html
<!DOCTYPE html>
<html>
<head>
<title>Test iframe</title>
</head>
<body>
<p>This is a web app.</p>
<iframe id="myIframe" frameborder="10" marginwidth="10" marginheight="0" scrolling="NO" width="100%" height="500px" sandbox="allow-scripts"></iframe>
<script>
async function getSrc(iframe, url) {
const resp = await fetch(url, {
method: 'GET',
headers: {
'Accept': 'application/json',
'ContentType': 'application/json',
}
});
console.log("resp", resp)
const app_html = await resp.text();
console.log("app_html", app_html)
iframe.srcdoc = app_html
}
const load = () => {
var iframe = document.getElementById("myIframe");
var url = "http://localhost:8000/bokeh-app";
getSrc(iframe, url)
}
load()
</script>
<p>This is the footer of the web app.</p>
</body>
</html>
```
- [FastAPI Server wrapping around Panel Server](https://panel.holoviz.org/how_to/integrations/FastAPI.html) in backend:
```python
import panel as pn
from bokeh.embed import server_document
from fastapi import FastAPI, Request
from fastapi.templating import Jinja2Templates
app = FastAPI()
templates = Jinja2Templates(directory="templates")
@app.get("/bokeh-app")
async def bkapp_page(request: Request):
script = server_document(
"http://127.0.0.1:5000/app",
)
return templates.TemplateResponse(
"base.html", {"request": request, "script": script}
)
def createApp():
return pn.pane.Markdown("Hello World!").servable()
pn.serve(
{"/app": createApp},
port=5000,
allow_websocket_origin=["*"],
address="127.0.0.1",
show=False,
)
```
- `base.html` template:
```
<!DOCTYPE html>
<html>
<head>
<title>Panel in FastAPI</title>
</head>
<body>
{{ script|safe }}
</body>
</html>
```
- Run with `uvicorn --port 8000 main_fastapi:app --host localhost`
#### Stack traceback and/or browser JavaScript console output
```python-traceback
2023-09-13 12:32:18,378 ERROR: panel.reactive - Callback failed for object named "Location00115" changing properties {'href': 'about:srcdoc', 'pathname': 'srcdoc', 'protocol': 'about:'}
Traceback (most recent call last):
File "/python_env/lib/python3.11/site-packages/panel/reactive.py", line 384, in _process_events
self.param.update(**self_events)
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 1895, in update
setattr(self_or_cls, k, v)
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 367, in _f
instance_param.__set__(obj, val)
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 369, in _f
return f(self, obj, val)
^^^^^^^^^^^^^^^^^
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 1201, in __set__
self._validate(val)
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 1350, in _validate
self._validate_regex(val, self.regex)
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 1338, in _validate_regex
raise ValueError("String parameter %r value %r does not match regex %r."
ValueError: String parameter 'pathname' value 'srcdoc' does not match regex '^$|[\\/].*$'.
Exception in callback functools.partial(<bound method IOLoop._discard_future_result of <tornado.platform.asyncio.AsyncIOMainLoop object at 0x7fa4a2344990>>, <Task finished name='Task-87' coro=<ServerSession.with_document_locked() done, defined at /python_env/lib/python3.11/site-packages/bokeh/server/session.py:77> exception=ValueError("String parameter 'pathname' value 'srcdoc' does not match regex '^$|[\\\\/].*$'.")>)
Traceback (most recent call last):
File "/python_env/lib/python3.11/site-packages/tornado/ioloop.py", line 738, in _run_callback
ret = callback()
^^^^^^^^^^
File "/python_env/lib/python3.11/site-packages/tornado/ioloop.py", line 762, in _discard_future_result
future.result()
File "/python_env/lib/python3.11/site-packages/bokeh/server/session.py", line 98, in _needs_document_lock_wrapper
result = await result
^^^^^^^^^^^^
File "/python_env/lib/python3.11/site-packages/panel/reactive.py", line 429, in _change_coroutine
state._handle_exception(e)
File "/python_env/lib/python3.11/site-packages/panel/io/state.py", line 436, in _handle_exception
raise exception
File "/python_env/lib/python3.11/site-packages/panel/reactive.py", line 427, in _change_coroutine
self._change_event(doc)
File "/python_env/lib/python3.11/site-packages/panel/reactive.py", line 445, in _change_event
self._process_events(events)
File "/python_env/lib/python3.11/site-packages/panel/reactive.py", line 384, in _process_events
self.param.update(**self_events)
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 1895, in update
setattr(self_or_cls, k, v)
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 367, in _f
instance_param.__set__(obj, val)
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 369, in _f
return f(self, obj, val)
^^^^^^^^^^^^^^^^^
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 1201, in __set__
self._validate(val)
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 1350, in _validate
self._validate_regex(val, self.regex)
File "/python_env/lib/python3.11/site-packages/param/parameterized.py", line 1338, in _validate_regex
raise ValueError("String parameter %r value %r does not match regex %r."
ValueError: String parameter 'pathname' value 'srcdoc' does not match regex '^$|[\\/].*$'.
```
#### ALL software version info
- python=3.11.5
- bokeh=3.2.2
- panel=1.2.2
- param=1.13.0
- fastapi=0.103.1
- uvicorn=0.23.2
| I experience the same issue when trying to figure out the best way to create a Panel Playground based on Pyodide.
```python
import param
import panel as pn
from pathlib import Path
import base64
html = (Path(__file__).parent/"script.html").read_text(encoding="utf8")
html = base64.b64encode(html.encode("ascii")).decode("ascii")
pn.extension()
class CustomComponent(pn.reactive.ReactiveHTML):
_template = f"""<iframe id="iframe_el" style="height:100%;width:100%"></iframe>
"""
html = param.String(html)
_scripts = {
"render": f"""
html = atob(data.html)
console.log(html)
iframe_el.srcdoc=html
"""
}
CustomComponent(width=500, height=200).servable()
```
[script.html.csv](https://github.com/holoviz/panel/files/13198649/script.html.csv)

## Workaround
If I add
```python
from panel.io.location import Location
Location.param.pathname.regex=r"^$|[\/]|srcdoc$"
```
to the top of the `code` section inside my `script.html` file, it works. | 2023-10-30T04:00:18.000 | -1.0 | [
"panel/tests/io/test_location.py::test_iframe_srcdoc_location"
] | [
"panel/tests/io/test_location.py::test_location_sync_param_update",
"panel/tests/io/test_location.py::test_location_sync_param_init_rename",
"panel/tests/io/test_location.py::test_location_unsync",
"panel/tests/io/test_location.py::test_location_sync_query_init",
"panel/tests/io/test_location.py::test_location_sync_param_init",
"panel/tests/io/test_location.py::test_location_sync_query_init_rename",
"panel/tests/io/test_location.py::test_location_sync_on_error",
"panel/tests/io/test_location.py::test_location_unsync_partial",
"panel/tests/io/test_location.py::test_location_sync_query",
"panel/tests/io/test_location.py::test_location_update_query",
"panel/tests/io/test_location.py::test_location_sync_param_init_partial",
"panel/tests/io/test_location.py::test_server_location_populate_from_request",
"panel/tests/io/test_location.py::test_location_sync_query_init_partial"
] | 5774 | "['5506']" |
ipython/ipython | ipython__ipython-11650 | f0f6cd8b8c9f74ea8b2c5e37b6132212ce661c28 | diff --git a/IPython/lib/display.py b/IPython/lib/display.py
index 73039bdb3..fe66f4f2b 100644
--- a/IPython/lib/display.py
+++ b/IPython/lib/display.py
@@ -54,6 +54,12 @@ class Audio(DisplayObject):
autoplay : bool
Set to True if the audio should immediately start playing.
Default is `False`.
+ normalize : bool
+ Whether audio should be normalized (rescaled) to the maximum possible
+ range. Default is `True`. When set to `False`, `data` must be between
+ -1 and 1 (inclusive), otherwise an error is raised.
+ Applies only when `data` is a list or array of samples; other types of
+ audio are never normalized.
Examples
--------
@@ -83,9 +89,9 @@ class Audio(DisplayObject):
"""
_read_flags = 'rb'
- def __init__(self, data=None, filename=None, url=None, embed=None, rate=None, autoplay=False):
+ def __init__(self, data=None, filename=None, url=None, embed=None, rate=None, autoplay=False, normalize=True):
if filename is None and url is None and data is None:
- raise ValueError("No image data found. Expecting filename, url, or data.")
+ raise ValueError("No audio data found. Expecting filename, url, or data.")
if embed is False and url is None:
raise ValueError("No url found. Expecting url when embed=False")
@@ -97,7 +103,9 @@ def __init__(self, data=None, filename=None, url=None, embed=None, rate=None, au
super(Audio, self).__init__(data=data, url=url, filename=filename)
if self.data is not None and not isinstance(self.data, bytes):
- self.data = self._make_wav(data,rate)
+ if rate is None:
+ raise ValueError("rate must be specified when data is a numpy array or list of audio samples.")
+ self.data = Audio._make_wav(data, rate, normalize)
def reload(self):
"""Reload the raw data from file or URL."""
@@ -112,41 +120,17 @@ def reload(self):
else:
self.mimetype = "audio/wav"
- def _make_wav(self, data, rate):
+ @staticmethod
+ def _make_wav(data, rate, normalize):
""" Transform a numpy array to a PCM bytestring """
import struct
from io import BytesIO
import wave
try:
- import numpy as np
-
- data = np.array(data, dtype=float)
- if len(data.shape) == 1:
- nchan = 1
- elif len(data.shape) == 2:
- # In wave files,channels are interleaved. E.g.,
- # "L1R1L2R2..." for stereo. See
- # http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
- # for channel ordering
- nchan = data.shape[0]
- data = data.T.ravel()
- else:
- raise ValueError('Array audio input must be a 1D or 2D array')
- scaled = np.int16(data/np.max(np.abs(data))*32767).tolist()
+ scaled, nchan = Audio._validate_and_normalize_with_numpy(data, normalize)
except ImportError:
- # check that it is a "1D" list
- idata = iter(data) # fails if not an iterable
- try:
- iter(idata.next())
- raise TypeError('Only lists of mono audio are '
- 'supported if numpy is not installed')
- except TypeError:
- # this means it's not a nested list, which is what we want
- pass
- maxabsvalue = float(max([abs(x) for x in data]))
- scaled = [int(x/maxabsvalue*32767) for x in data]
- nchan = 1
+ scaled, nchan = Audio._validate_and_normalize_without_numpy(data, normalize)
fp = BytesIO()
waveobj = wave.open(fp,mode='wb')
@@ -160,6 +144,48 @@ def _make_wav(self, data, rate):
return val
+ @staticmethod
+ def _validate_and_normalize_with_numpy(data, normalize):
+ import numpy as np
+
+ data = np.array(data, dtype=float)
+ if len(data.shape) == 1:
+ nchan = 1
+ elif len(data.shape) == 2:
+ # In wave files,channels are interleaved. E.g.,
+ # "L1R1L2R2..." for stereo. See
+ # http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
+ # for channel ordering
+ nchan = data.shape[0]
+ data = data.T.ravel()
+ else:
+ raise ValueError('Array audio input must be a 1D or 2D array')
+
+ max_abs_value = np.max(np.abs(data))
+ normalization_factor = Audio._get_normalization_factor(max_abs_value, normalize)
+ scaled = np.int16(data / normalization_factor * 32767).tolist()
+ return scaled, nchan
+
+
+ @staticmethod
+ def _validate_and_normalize_without_numpy(data, normalize):
+ try:
+ max_abs_value = float(max([abs(x) for x in data]))
+ except TypeError:
+ raise TypeError('Only lists of mono audio are '
+ 'supported if numpy is not installed')
+
+ normalization_factor = Audio._get_normalization_factor(max_abs_value, normalize)
+ scaled = [int(x / normalization_factor * 32767) for x in data]
+ nchan = 1
+ return scaled, nchan
+
+ @staticmethod
+ def _get_normalization_factor(max_abs_value, normalize):
+ if not normalize and max_abs_value > 1:
+ raise ValueError('Audio data must be between -1 and 1 when normalize=False.')
+ return max_abs_value if normalize else 1
+
def _data_and_metadata(self):
"""shortcut for returning metadata with url information, if defined"""
md = {}
diff --git a/docs/source/whatsnew/pr/optional-audio-normalization.rst b/docs/source/whatsnew/pr/optional-audio-normalization.rst
new file mode 100644
index 000000000..c4bd33361
--- /dev/null
+++ b/docs/source/whatsnew/pr/optional-audio-normalization.rst
@@ -0,0 +1,7 @@
+Make audio normalization optional
+=================================
+
+Added 'normalize' argument to `IPython.display.Audio`. This argument applies
+when audio data is given as an array of samples. The default of `normalize=True`
+preserves prior behavior of normalizing the audio to the maximum possible range.
+Setting to `False` disables normalization.
\ No newline at end of file
| diff --git a/IPython/lib/tests/test_display.py b/IPython/lib/tests/test_display.py
index aa0d617ed..9854ba635 100644
--- a/IPython/lib/tests/test_display.py
+++ b/IPython/lib/tests/test_display.py
@@ -19,13 +19,17 @@
import pathlib
except ImportError:
pass
+from unittest import TestCase, mock
+import struct
+import wave
+from io import BytesIO
# Third-party imports
import nose.tools as nt
+import numpy
# Our own imports
from IPython.lib import display
-from IPython.testing.decorators import skipif_not_numpy
#-----------------------------------------------------------------------------
# Classes and functions
@@ -179,11 +183,71 @@ def test_recursive_FileLinks():
actual = actual.split('\n')
nt.assert_equal(len(actual), 2, actual)
-@skipif_not_numpy
def test_audio_from_file():
path = pjoin(dirname(__file__), 'test.wav')
display.Audio(filename=path)
+class TestAudioDataWithNumpy(TestCase):
+ def test_audio_from_numpy_array(self):
+ test_tone = get_test_tone()
+ audio = display.Audio(test_tone, rate=44100)
+ nt.assert_equal(len(read_wav(audio.data)), len(test_tone))
+
+ def test_audio_from_list(self):
+ test_tone = get_test_tone()
+ audio = display.Audio(list(test_tone), rate=44100)
+ nt.assert_equal(len(read_wav(audio.data)), len(test_tone))
+
+ def test_audio_from_numpy_array_without_rate_raises(self):
+ nt.assert_raises(ValueError, display.Audio, get_test_tone())
+
+ def test_audio_data_normalization(self):
+ expected_max_value = numpy.iinfo(numpy.int16).max
+ for scale in [1, 0.5, 2]:
+ audio = display.Audio(get_test_tone(scale), rate=44100)
+ actual_max_value = numpy.max(numpy.abs(read_wav(audio.data)))
+ nt.assert_equal(actual_max_value, expected_max_value)
+
+ def test_audio_data_without_normalization(self):
+ max_int16 = numpy.iinfo(numpy.int16).max
+ for scale in [1, 0.5, 0.2]:
+ test_tone = get_test_tone(scale)
+ test_tone_max_abs = numpy.max(numpy.abs(test_tone))
+ expected_max_value = int(max_int16 * test_tone_max_abs)
+ audio = display.Audio(test_tone, rate=44100, normalize=False)
+ actual_max_value = numpy.max(numpy.abs(read_wav(audio.data)))
+ nt.assert_equal(actual_max_value, expected_max_value)
+
+ def test_audio_data_without_normalization_raises_for_invalid_data(self):
+ nt.assert_raises(
+ ValueError,
+ lambda: display.Audio([1.001], rate=44100, normalize=False))
+ nt.assert_raises(
+ ValueError,
+ lambda: display.Audio([-1.001], rate=44100, normalize=False))
+
+def simulate_numpy_not_installed():
+ return mock.patch('numpy.array', mock.MagicMock(side_effect=ImportError))
+
+@simulate_numpy_not_installed()
+class TestAudioDataWithoutNumpy(TestAudioDataWithNumpy):
+ # All tests from `TestAudioDataWithNumpy` are inherited.
+
+ def test_audio_raises_for_nested_list(self):
+ stereo_signal = [list(get_test_tone())] * 2
+ nt.assert_raises(
+ TypeError,
+ lambda: display.Audio(stereo_signal, rate=44100))
+
+def get_test_tone(scale=1):
+ return numpy.sin(2 * numpy.pi * 440 * numpy.linspace(0, 1, 44100)) * scale
+
+def read_wav(data):
+ with wave.open(BytesIO(data)) as wave_file:
+ wave_data = wave_file.readframes(wave_file.getnframes())
+ num_samples = wave_file.getnframes() * wave_file.getnchannels()
+ return struct.unpack('<%sh' % num_samples, wave_data)
+
def test_code_from_file():
c = display.Code(filename=__file__)
assert c._repr_html_().startswith('<style>')
| Volume normalization in IPython.display.Audio should be optional
I am manipulating audio using numpy with IPython Notebook and want to use IPython.display.Audio for listening to the numpy arrays. Unfortunately, auto-normalization tampers with the results. Example:
```
# Generate a sound
import IPython
import numpy as np
framerate = 44100
t = np.linspace(0,5,framerate*5)
tone = np.sin(2*np.pi*220*t)
antitone = np.sin(2*np.pi*220*t + np.pi)
IPython.display.Audio(tone+antitone, rate=framerate)
```
Adding a sin wav to itself shifted by 180 deg should give total silence. Instead, auto-normalization amplifies the floating point errors. The problem is in the IPython.lib.display.Audio _make_wav method, which always normalizes the numpy array(see 'scaled' variable). I think that we should have a normalize keyword argument, so that Audio can be used for audio analysis. Something like:
```
Audio(tone+antitone, rate=framerate, normalize=False)
```
| 2019-03-15 16:07:13+00:00 | -1.0 | [
"IPython/lib/tests/test_display.py::TestAudioDataWithNumpy::test_audio_data_without_normalization",
"IPython/lib/tests/test_display.py::TestAudioDataWithNumpy::test_audio_data_without_normalization_raises_for_invalid_data",
"IPython/lib/tests/test_display.py::TestAudioDataWithNumpy::test_audio_from_numpy_array_without_rate_raises",
"IPython/lib/tests/test_display.py::TestAudioDataWithoutNumpy::test_audio_data_normalization",
"IPython/lib/tests/test_display.py::TestAudioDataWithoutNumpy::test_audio_data_without_normalization",
"IPython/lib/tests/test_display.py::TestAudioDataWithoutNumpy::test_audio_data_without_normalization_raises_for_invalid_data",
"IPython/lib/tests/test_display.py::TestAudioDataWithoutNumpy::test_audio_from_list",
"IPython/lib/tests/test_display.py::TestAudioDataWithoutNumpy::test_audio_from_numpy_array",
"IPython/lib/tests/test_display.py::TestAudioDataWithoutNumpy::test_audio_from_numpy_array_without_rate_raises",
"IPython/lib/tests/test_display.py::TestAudioDataWithoutNumpy::test_audio_raises_for_nested_list"
] | [
"IPython/lib/tests/test_display.py::test_instantiation_FileLink",
"IPython/lib/tests/test_display.py::test_warning_on_non_existant_path_FileLink",
"IPython/lib/tests/test_display.py::test_existing_path_FileLink",
"IPython/lib/tests/test_display.py::test_existing_path_FileLink_repr",
"IPython/lib/tests/test_display.py::test_error_on_directory_to_FileLink",
"IPython/lib/tests/test_display.py::test_instantiation_FileLinks",
"IPython/lib/tests/test_display.py::test_warning_on_non_existant_path_FileLinks",
"IPython/lib/tests/test_display.py::test_existing_path_FileLinks",
"IPython/lib/tests/test_display.py::test_existing_path_FileLinks_alt_formatter",
"IPython/lib/tests/test_display.py::test_existing_path_FileLinks_repr",
"IPython/lib/tests/test_display.py::test_existing_path_FileLinks_repr_alt_formatter",
"IPython/lib/tests/test_display.py::test_error_on_file_to_FileLinks",
"IPython/lib/tests/test_display.py::test_recursive_FileLinks",
"IPython/lib/tests/test_display.py::test_audio_from_file",
"IPython/lib/tests/test_display.py::TestAudioDataWithNumpy::test_audio_data_normalization",
"IPython/lib/tests/test_display.py::TestAudioDataWithNumpy::test_audio_from_list",
"IPython/lib/tests/test_display.py::TestAudioDataWithNumpy::test_audio_from_numpy_array",
"IPython/lib/tests/test_display.py::test_code_from_file"
] | |||
marcosschroh/dataclasses-avroschema | marcosschroh__dataclasses-avroschema-253 | d2334ff743df40cf11ad427cab45de9b714c6920 | diff --git a/dataclasses_avroschema/exceptions.py b/dataclasses_avroschema/exceptions.py
index f694907..079718a 100644
--- a/dataclasses_avroschema/exceptions.py
+++ b/dataclasses_avroschema/exceptions.py
@@ -1,22 +1,6 @@
import typing
-class NameSpaceRequiredException(Exception):
- def __init__(self, field_type: typing.Any, field_name: str) -> None:
- self.field_type = field_type
- self.field_name = field_name
-
- def __repr__(self) -> str:
- class_name = self.__class__.__name__
- return f"{class_name} {self.field_name},{self.field_type}"
-
- def __str__(self) -> str:
- return ( # pragma: no cover
- f"Required namespace in Meta for type {self.field_type}. "
- f"The field {self.field_name} is using an exiting type"
- )
-
-
class InvalidMap(Exception):
def __init__(self, field_name: str, key_type: typing.Any) -> None:
self.field_name = field_name
diff --git a/dataclasses_avroschema/fields.py b/dataclasses_avroschema/fields.py
index 74a22a9..fb4d62a 100644
--- a/dataclasses_avroschema/fields.py
+++ b/dataclasses_avroschema/fields.py
@@ -20,7 +20,7 @@ from pytz import utc
from dataclasses_avroschema import schema_generator, serialization, types, utils
from . import field_utils
-from .exceptions import InvalidMap, NameSpaceRequiredException
+from .exceptions import InvalidMap
from .types import JsonDict
PY_VER = sys.version_info
@@ -483,8 +483,9 @@ class EnumField(BaseField):
else:
namespace = metadata.get("namespace")
if namespace is None:
- raise NameSpaceRequiredException(field_type=self.type, field_name=name)
- return f"{namespace}.{name}"
+ return name
+ else:
+ return f"{namespace}.{name}"
def get_default_value(self) -> typing.Union[str, dataclasses._MISSING_TYPE, None]:
if self.default == types.MissingSentinel:
@@ -770,8 +771,9 @@ class RecordField(BaseField):
record_type["name"] = name
else:
if metadata.namespace is None:
- raise NameSpaceRequiredException(field_type=self.type, field_name=self.name)
- record_type = f"{metadata.namespace}.{name}"
+ record_type = name
+ else:
+ record_type = f"{metadata.namespace}.{name}"
if self.default is None:
return [field_utils.NULL, record_type]
diff --git a/docs/complex_types.md b/docs/complex_types.md
index 8e7f9b0..54cb245 100644
--- a/docs/complex_types.md
+++ b/docs/complex_types.md
@@ -71,7 +71,8 @@ User.avro_schema()
### Repeated Enums
-Sometimes we have cases where an `Enum` is used more than once with a particular class, for those cases, you `MUST` define the namespace in order to generate a valid `avro schema`
+Sometimes we have cases where an `Enum` is used more than once with a particular class, for those cases the same `type` is used in order to generate a valid schema.
+It is a good practice but *NOT* neccesary to a define the `namespace` on the repeated `type`.
```python
import enum
@@ -123,7 +124,7 @@ resulting in
"name": "optional_distance",
"type": [
"null",
- "trip.TripDistance"
+ "trip.TripDistance" // using the namespace and the TripDistance type
],
"default": null
}
@@ -132,9 +133,6 @@ resulting in
}
```
-!!! warning
- If you do not specify the `namespace` in the `Enum` the exception `NameSpaceRequiredException` is raised
-
## Arrays
```python title="Array example"
diff --git a/docs/good_practices.md b/docs/good_practices.md
index 5a8f5e2..c632df3 100644
--- a/docs/good_practices.md
+++ b/docs/good_practices.md
@@ -1,6 +1,6 @@
-## Streaming
+# Streaming
-### Schema server and AvroModel
+## Schema server and AvroModel
First, let's clarify what a schema server is: It is a `central place/repository` that contains schemas with formats like `avro`, `json` or `protobuf`, with the purpose of exposing them through an `API`, so applications can access them and `serialize/deserialize` events. The schema server could have a `RESTful` interface so tasks like `create`, `delete` `get` schemas can be performed easily.
@@ -29,8 +29,7 @@ class User(AvroModel):
The purpose of the `schema_id` is to give a fast notion what the model is representing. Also, could be used as `documentation`
-
-### Include event metadata
+## Include event metadata
`avro schemas` are used widely in `streaming` to `serialize` events, and with `dataclasses-avroschemas` it is straigtforward. Once
that you have the event, it is a good practice to also add the `event metadata` at the moment of `producing` so `consumers` will know what to do.
@@ -56,7 +55,7 @@ class User(AvroModel):
money: float = 100.3
class Meta:
- schema_id = "https://my-schema-server/users/schema.avsc" # or in a Concluent way: https://my-schema-server/schemas/ids/{int: id}
+ schema_id = "https://my-schema-server/users/schema.avsc" # or in a Confluent way: https://my-schema-server/schemas/ids/{int: id}
async def produce():
@@ -80,4 +79,14 @@ async def produce():
if __name__ == "__main__":
asyncio.run(produce)
-```
\ No newline at end of file
+```
+
+## Define Namespaces
+
+When there are types that are used more than once in a schema, for example `records` and `enums` it is a good practice to define `namespace` for the repeated type.
+This will allow you to identify more easily the `types`, specially if you have all the schemas in a `schema server` like `confluent`.
+
+Uses cases:
+
+- [Reusing types with records](https://marcosschroh.github.io/dataclasses-avroschema/schema_relationships/#avoid-name-collision-in-multiple-relationships)
+- [Reusing types with enums](https://marcosschroh.github.io/dataclasses-avroschema/complex_types/#repeated-enums)
diff --git a/docs/model_generator.md b/docs/model_generator.md
index cee7009..acec709 100644
--- a/docs/model_generator.md
+++ b/docs/model_generator.md
@@ -11,7 +11,7 @@ The rendered result is a string that contains the proper identation, so the resu
In future releases it will be possible to generate models for other programming langagues like `java` and `rust`
!!! note
- You can also use [dc-avro](https://github.com/marcosschroh/dc-avro)d to generate the models from the command line
+ You can also use [dc-avro](https://github.com/marcosschroh/dc-avro) to generate the models from the command line
## Usage
diff --git a/docs/schema_relationships.md b/docs/schema_relationships.md
index 9f4bbba..39cf3df 100644
--- a/docs/schema_relationships.md
+++ b/docs/schema_relationships.md
@@ -278,11 +278,10 @@ User.avro_schema()
## Avoid name collision in multiple relationships
-Sometimes we have relationships where a class is related more than once with a particular class,
-and the name for the nested schemas must be different, otherwise we will generate an invalid `avro schema`.
-For those cases, you *MUST* define the `namespace`.
+Sometimes we have relationships where a class is related more than once with a particular class.
+In those cases, the `predifne` type is used in order to generate a valid schema. It is a good practice but *NOT* neccesary to a define the `namespace` on the repeated `type`.
-```python title="Avoiding name collision example"
+```python title="Repetead types"
from dataclasses import dataclass
from datetime import datetime
import json
@@ -296,7 +295,7 @@ class Location(AvroModel):
longitude: float
class Meta:
- namespace = "types.location_type"
+ namespace = "types.location_type" # Good practise to use `namespaces`
@dataclass
class Trip(AvroModel):
@@ -333,7 +332,7 @@ Trip.avro_schema()
"type": {"type": "long", "logicalType": "timestamp-millis"}
},
{
- "name": "finish_location", "type": "types.location_type.Location" // using the namespace
+ "name": "finish_location", "type": "types.location_type.Location" // using the namespace and the Location type
}
],
"doc": "Trip(start_time: datetime.datetime, start_location: __main__.Location, finish_time: datetime.datetime, finish_location: __main__.Location)"
| diff --git a/tests/schemas/test_fastavro_paser_schema.py b/tests/schemas/test_fastavro_paser_schema.py
index 81ae619..5c09b80 100644
--- a/tests/schemas/test_fastavro_paser_schema.py
+++ b/tests/schemas/test_fastavro_paser_schema.py
@@ -152,6 +152,19 @@ def test_one_to_one_repeated_schema():
assert Trip.fake()
+def test_repeated_schema_without_namespace():
+ class Bus(AvroModel):
+ "A Bus"
+ engine_name: str
+
+ class UnionSchema(AvroModel):
+ "Some Unions"
+ bus_one: Bus
+ bus_two: Bus
+
+ parse_schema(UnionSchema.avro_schema_to_python())
+
+
def test_one_to_one_repeated_schema_in_array():
"""
Test relationship one-to-one with more than once schema
diff --git a/tests/schemas/test_schema.py b/tests/schemas/test_schema.py
index bde9754..9582de6 100644
--- a/tests/schemas/test_schema.py
+++ b/tests/schemas/test_schema.py
@@ -5,7 +5,7 @@ from dataclasses import dataclass
import pytest
from fastavro.validation import ValidationError
-from dataclasses_avroschema import AvroModel, exceptions
+from dataclasses_avroschema import AvroModel
from dataclasses_avroschema.schema_definition import BaseSchemaDefinition
from dataclasses_avroschema.types import JsonDict
@@ -123,20 +123,6 @@ def test_not_implementd_methods():
assert msg == str(excinfo.value)
-def test_namespace_required():
- class Bus(AvroModel):
- "A Bus"
- engine_name: str
-
- class UnionSchema(AvroModel):
- "Some Unions"
- bus_one: Bus
- bus_two: Bus
-
- with pytest.raises(exceptions.NameSpaceRequiredException):
- assert UnionSchema.avro_schema()
-
-
def test_inherit_dataclass_missing_docs():
@dataclass
class BaseUser:
diff --git a/tests/schemas/test_schema_with_complex_types.py b/tests/schemas/test_schema_with_complex_types.py
index 1fa4f1b..d8d6755 100644
--- a/tests/schemas/test_schema_with_complex_types.py
+++ b/tests/schemas/test_schema_with_complex_types.py
@@ -9,7 +9,7 @@ import uuid
import pytest
from fastavro import parse_schema
-from dataclasses_avroschema import AvroModel, exceptions
+from dataclasses_avroschema import AvroModel
from dataclasses_avroschema.types import JsonDict
PY_VER = sys.version_info
@@ -155,7 +155,7 @@ def test_schema_with_new_unions_defaults_syntax(default_union_schema: JsonDict)
assert User.avro_schema() == json.dumps(default_union_schema)
-def test_enum_namespace_required() -> None:
+def test_repeated_enum_without_namespace() -> None:
class UserType(enum.Enum):
BASIC = "Basic"
PREMIUM = "Premium"
@@ -165,8 +165,7 @@ def test_enum_namespace_required() -> None:
user_type: UserType
user_type_optional: typing.Optional[UserType]
- with pytest.raises(exceptions.NameSpaceRequiredException):
- User.avro_schema()
+ parse_schema(User.avro_schema_to_python())
# This is to explicitly test the behavior for a typing.Optional[T] field with no default
| NameSpaceRequiredException when reusing child class
Hi there! First off, thank you for the fantastic library. ๐ฐ
I'm running into the following issue in a migration to dataclasess-avroschema:
```python
from dataclasses_avroschema.avrodantic import AvroBaseModel
class ChildA(AvroBaseModel):
id: int
class ChildB(AvroBaseModel):
id: int
class Parent(AvroBaseModel):
x: ChildA | ChildB
y: ChildA | ChildB
class Meta:
namespace = "com.example"
d = Parent(x=ChildA(id=42), y=ChildB(id=43))
print(d.serialize(serialization_type="avro-json"))
```
```
File "C:\Python3.11\Lib\site-packages\dataclasses_avroschema\fields.py", line 773, in get_avro_type
raise NameSpaceRequiredException(field_type=self.type, field_name=self.name)
dataclasses_avroschema.exceptions.NameSpaceRequiredException: Required namespace in Meta for type <class '__main__.ChildA'>. The field y is using an exiting type
```
Removing `y` fixes the issue and correctly produces:
```json
{"x": {"com.example.ChildA": {"id": 42}}}
```
The problem seems to be that `exist_type` turns `True`, but I don't understand enough of the codebase to know what the correct fix is. ๐ | 2023-03-06 14:04:44+00:00 | -1.0 | [
"tests/schemas/test_fastavro_paser_schema.py::test_repeated_schema_without_namespace",
"tests/schemas/test_schema_with_complex_types.py::test_repeated_enum_without_namespace"
] | [
"tests/schemas/test_fastavro_paser_schema.py::test_minimal_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_schema_with_field_metadata",
"tests/schemas/test_fastavro_paser_schema.py::test_schema_with_extra_avro_attrs",
"tests/schemas/test_fastavro_paser_schema.py::test_advance_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_advance_schema_with_defaults",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_one_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_one_repeated_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_one_repeated_schema_in_array",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_one_repeated_schema_in_map",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_many_repeated_schema_in_array_and_map",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_many_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_many_with_map_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_one_self_relationship",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_many_self_reference_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_many_self_reference_map_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_logical_types_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_logical_micro_types_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_schema_with_union_types",
"tests/schemas/test_fastavro_paser_schema.py::test_schema_with_union_record_types",
"tests/schemas/test_fastavro_paser_schema.py::test_schema_array_with_union_types",
"tests/schemas/test_fastavro_paser_schema.py::test_namespaces",
"tests/schemas/test_fastavro_paser_schema.py::test_use_of_same_type_in_nested_list",
"tests/schemas/test_fastavro_paser_schema.py::test_two_different_child_records",
"tests/schemas/test_fastavro_paser_schema.py::test_nested_schemas_splitted",
"tests/schemas/test_fastavro_paser_schema.py::test_nested_scheamas_splitted_with_intermediates",
"tests/schemas/test_schema.py::test_total_schema_fields_from_class",
"tests/schemas/test_schema.py::test_total_schema_fields_from_instance",
"tests/schemas/test_schema.py::test_schema_render_from_class_with_field_metadata",
"tests/schemas/test_schema.py::test_schema_render_from_class",
"tests/schemas/test_schema.py::test_schema_render_from_instance",
"tests/schemas/test_schema.py::test_schema_render_from_class_with_doc",
"tests/schemas/test_schema.py::test_schema_render_from_instance_with_doc",
"tests/schemas/test_schema.py::test_schema_doc_from_meta",
"tests/schemas/test_schema.py::test_schema_cached",
"tests/schemas/test_schema.py::test_extra_avro_attributes",
"tests/schemas/test_schema.py::test_class_empty_metaclass",
"tests/schemas/test_schema.py::test_invalid_schema_type",
"tests/schemas/test_schema.py::test_not_implementd_methods",
"tests/schemas/test_schema.py::test_inherit_dataclass_missing_docs",
"tests/schemas/test_schema.py::test_get_fields",
"tests/schemas/test_schema.py::test_schema_name_from_relationship",
"tests/schemas/test_schema.py::test_alias_from_relationship",
"tests/schemas/test_schema.py::test_validate",
"tests/schemas/test_schema.py::test_get_enum_type_map",
"tests/schemas/test_schema.py::test_get_enum_type_map_with_unions",
"tests/schemas/test_schema.py::test_get_enum_type_map_with_sub_record",
"tests/schemas/test_schema.py::test_deserialize_complex_types",
"tests/schemas/test_schema.py::test_deserialize_complex_types_invalid_enum_instance",
"tests/schemas/test_schema.py::test_parse_obj",
"tests/schemas/test_schema.py::test_avro_schema_to_python_method_with_inheritance",
"tests/schemas/test_schema.py::test_avro_schema_method_with_inheritance",
"tests/schemas/test_schema_with_complex_types.py::test_schema_with_complex_types",
"tests/schemas/test_schema_with_complex_types.py::test_schema_with_complex_types_and_defaults",
"tests/schemas/test_schema_with_complex_types.py::test_schema_with_unions_type",
"tests/schemas/test_schema_with_complex_types.py::test_schema_with_unions_defaults",
"tests/schemas/test_schema_with_complex_types.py::test_schema_typing_optional_behavior"
] | |||
repobee/repobee | repobee__repobee-201 | 771ab4e3b32ee13edf575b442f27f7c840db2dff | diff --git a/repobee/github_api.py b/repobee/github_api.py
index 5a698af..a1d4585 100644
--- a/repobee/github_api.py
+++ b/repobee/github_api.py
@@ -21,6 +21,7 @@ import github
from repobee import exception
from repobee import tuples
from repobee import util
+from repobee import apimeta
REQUIRED_OAUTH_SCOPES = {"admin:org", "repo"}
@@ -95,7 +96,7 @@ def _try_api_request(ignore_statuses: Optional[Iterable[int]] = None):
)
-class GitHubAPI:
+class GitHubAPI(apimeta.API):
"""A highly specialized GitHub API class for repobee. The API is
affiliated both with an organization, and with the whole GitHub
instance. Almost all operations take place on the target
@@ -136,7 +137,7 @@ class GitHubAPI:
def token(self):
return self._token
- def get_teams_in(
+ def _get_teams_in(
self, team_names: Iterable[str]
) -> Generator[github.Team.Team, None, None]:
"""Get all teams that match any team name in the team_names iterable.
@@ -163,7 +164,7 @@ class GitHubAPI:
team_names: A list of team names for teams to be deleted.
"""
deleted = set() # only for logging
- for team in self.get_teams_in(team_names):
+ for team in self._get_teams_in(team_names):
team.delete()
deleted.add(team.name)
LOGGER.info("deleted team {}".format(team.name))
@@ -220,7 +221,7 @@ class GitHubAPI:
for team in [team for team in teams if member_lists[team.name]]:
self._ensure_members_in_team(team, member_lists[team.name])
- return list(self.get_teams_in(set(member_lists.keys())))
+ return list(self._get_teams_in(set(member_lists.keys())))
def _ensure_teams_exist(
self, team_names: Iterable[str], permission: str = "push"
@@ -280,9 +281,9 @@ class GitHubAPI:
", ".join(existing_members), team.name
)
)
- self.add_to_team(missing_members, team)
+ self._add_to_team(missing_members, team)
- def add_to_team(self, members: Iterable[str], team: github.Team.Team):
+ def _add_to_team(self, members: Iterable[str], team: github.Team.Team):
"""Add members to a team.
Args:
@@ -294,18 +295,18 @@ class GitHubAPI:
for user in users:
team.add_membership(user)
- def create_repos(self, repo_infos: Iterable[tuples.Repo]):
+ def create_repos(self, repos: Iterable[tuples.Repo]):
"""Create repositories in the given organization according to the Repos.
Repos that already exist are skipped.
Args:
- repo_infos: An iterable of Repo namedtuples.
+ repos: An iterable of Repo namedtuples.
Returns:
A list of urls to all repos corresponding to the Repos.
"""
repo_urls = []
- for info in repo_infos:
+ for info in repos:
created = False
with _try_api_request(ignore_statuses=[422]):
kwargs = dict(
@@ -489,7 +490,7 @@ class GitHubAPI:
issue: An an optional Issue tuple to override the default issue.
"""
issue = issue or DEFAULT_REVIEW_ISSUE
- for team, repo in self.add_repos_to_teams(team_to_repos):
+ for team, repo in self._add_repos_to_teams(team_to_repos):
# TODO team.get_members() api request is a bit redundant, it
# can be solved in a more efficient way by passing in the
# allocations
@@ -527,7 +528,7 @@ class GitHubAPI:
assigned_repos is a :py:class:`~repobee.tuples.Review`.
"""
reviews = collections.defaultdict(list)
- teams = self.get_teams_in(review_team_names)
+ teams = self._get_teams_in(review_team_names)
for team in teams:
with _try_api_request():
LOGGER.info("processing {}".format(team.name))
@@ -557,7 +558,7 @@ class GitHubAPI:
return reviews
- def add_repos_to_teams(
+ def _add_repos_to_teams(
self, team_to_repos: Mapping[str, Iterable[str]]
) -> Generator[
Tuple[github.Team.Team, github.Repository.Repository], None, None
| diff --git a/tests/test_github_api.py b/tests/test_github_api.py
index f03a05a..9976d19 100644
--- a/tests/test_github_api.py
+++ b/tests/test_github_api.py
@@ -668,7 +668,7 @@ class TestGetIssues:
@pytest.fixture
def team_to_repos(api, no_repos, organization):
- """Create a team_to_repos mapping for use in add_repos_to_teams, anc create
+ """Create a team_to_repos mapping for use in _add_repos_to_teams, anc create
each team and repo. Return the team_to_repos mapping.
"""
num_teams = 10
@@ -718,7 +718,7 @@ class TestAddReposToTeams:
expected_tups = sorted(team_to_repos.items())
# act
- result = list(api.add_repos_to_teams(team_to_repos))
+ result = list(api._add_repos_to_teams(team_to_repos))
result.sort(key=lambda tup: tup[0].name)
# assert
| Adapt github_api.GitHubAPI to make use of the apimeta.API class | 2019-05-26 20:29:54+00:00 | -1.0 | [
"tests/test_github_api.py::TestAddReposToTeams::test_happy_path"
] | [
"tests/test_github_api.py::TestEnsureTeamsAndMembers::test_no_previous_teams",
"tests/test_github_api.py::TestEnsureTeamsAndMembers::test_all_teams_exist_but_without_members",
"tests/test_github_api.py::TestEnsureTeamsAndMembers::test_raises_on_non_422_exception[unexpected_exc0]",
"tests/test_github_api.py::TestEnsureTeamsAndMembers::test_raises_on_non_422_exception[unexpected_exc1]",
"tests/test_github_api.py::TestEnsureTeamsAndMembers::test_raises_on_non_422_exception[unexpected_exc2]",
"tests/test_github_api.py::TestEnsureTeamsAndMembers::test_running_twice_has_no_ill_effects",
"tests/test_github_api.py::TestCreateRepos::test_creates_correct_repos",
"tests/test_github_api.py::TestCreateRepos::test_skips_existing_repos",
"tests/test_github_api.py::TestCreateRepos::test_raises_on_unexpected_error[unexpected_exception0]",
"tests/test_github_api.py::TestCreateRepos::test_raises_on_unexpected_error[unexpected_exception1]",
"tests/test_github_api.py::TestCreateRepos::test_raises_on_unexpected_error[unexpected_exception2]",
"tests/test_github_api.py::TestCreateRepos::test_returns_all_urls",
"tests/test_github_api.py::TestCreateRepos::test_create_repos_without_team_id",
"tests/test_github_api.py::TestGetRepoUrls::test_with_token_without_user",
"tests/test_github_api.py::TestGetRepoUrls::test_with_token_and_user",
"tests/test_github_api.py::TestGetRepoUrls::test_with_students",
"tests/test_github_api.py::TestOpenIssue::test_on_existing_repos",
"tests/test_github_api.py::TestOpenIssue::test_on_some_non_existing_repos",
"tests/test_github_api.py::TestOpenIssue::test_no_crash_when_no_repos_are_found",
"tests/test_github_api.py::TestCloseIssue::test_closes_correct_issues",
"tests/test_github_api.py::TestCloseIssue::test_no_crash_if_no_repos_found",
"tests/test_github_api.py::TestCloseIssue::test_no_crash_if_no_issues_found",
"tests/test_github_api.py::TestGetIssues::test_get_all_open_issues",
"tests/test_github_api.py::TestGetIssues::test_get_all_closed_issues",
"tests/test_github_api.py::TestGetIssues::test_get_issues_when_one_repo_doesnt_exist",
"tests/test_github_api.py::TestGetIssues::test_get_open_issues_by_regex",
"tests/test_github_api.py::TestAddReposToReviewTeams::test_with_default_issue",
"tests/test_github_api.py::TestDeleteTeams::test_delete_non_existing_teams_does_not_crash",
"tests/test_github_api.py::TestDeleteTeams::test_delete_existing_teams",
"tests/test_github_api.py::TestVerifySettings::test_happy_path",
"tests/test_github_api.py::TestVerifySettings::test_empty_token_raises_bad_credentials",
"tests/test_github_api.py::TestVerifySettings::test_incorrect_info_raises_not_found_error[get_user]",
"tests/test_github_api.py::TestVerifySettings::test_incorrect_info_raises_not_found_error[get_organization]",
"tests/test_github_api.py::TestVerifySettings::test_bad_token_scope_raises",
"tests/test_github_api.py::TestVerifySettings::test_not_owner_raises",
"tests/test_github_api.py::TestVerifySettings::test_raises_unexpected_exception_on_unexpected_status",
"tests/test_github_api.py::TestVerifySettings::test_none_user_raises",
"tests/test_github_api.py::TestVerifySettings::test_mismatching_user_login_raises",
"tests/test_github_api.py::TestGetPeerReviewProgress::test_nothing_returns",
"tests/test_github_api.py::TestGetPeerReviewProgress::test_with_review_teams_but_no_repos"
] | |||
Pylons/pyramid | Pylons__pyramid-3413 | 133db09d179c3f5afe7e02dc13ab6687517db5a1 | diff --git a/src/pyramid/interfaces.py b/src/pyramid/interfaces.py
--- a/src/pyramid/interfaces.py
+++ b/src/pyramid/interfaces.py
@@ -1084,21 +1084,27 @@ class ISession(IDict):
""" An interface representing a session (a web session object,
usually accessed via ``request.session``.
- Keys and values of a session must be pickleable.
+ Keys and values of a session must be JSON-serializable.
.. warning::
- In :app:`Pyramid` 2.0 the session will only be required to support
- types that can be serialized using JSON. It's recommended to switch any
- session implementations to support only JSON and to only store primitive
- types in sessions. See :ref:`pickle_session_deprecation` for more
- information about why this change is being made.
+ In :app:`Pyramid` 2.0 the session was changed to only be required to
+ support types that can be serialized using JSON. It's recommended to
+ switch any session implementations to support only JSON and to only
+ store primitive types in sessions. See
+ :ref:`pickle_session_deprecation` for more information about why this
+ change was made.
.. versionchanged:: 1.9
- Sessions are no longer required to implement ``get_csrf_token`` and
- ``new_csrf_token``. CSRF token support was moved to the pluggable
- :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook.
+ Sessions are no longer required to implement ``get_csrf_token`` and
+ ``new_csrf_token``. CSRF token support was moved to the pluggable
+ :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook.
+
+ .. versionchanged:: 2.0
+
+ Sessions now need to be JSON-serializable. This is more strict than
+ the previous requirement of pickleable objects.
"""
diff --git a/src/pyramid/session.py b/src/pyramid/session.py
--- a/src/pyramid/session.py
+++ b/src/pyramid/session.py
@@ -1,7 +1,6 @@
import binascii
import os
import time
-import warnings
from zope.deprecation import deprecated
from zope.interface import implementer
@@ -350,8 +349,6 @@ def SignedCookieSessionFactory(
serializer=None,
):
"""
- .. versionadded:: 1.5
-
Configure a :term:`session factory` which will provide signed
cookie-based sessions. The return value of this
function is a :term:`session factory`, which may be provided as
@@ -441,33 +438,29 @@ def SignedCookieSessionFactory(
method should accept bytes and return a Python object. The ``dumps``
method should accept a Python object and return bytes. A ``ValueError``
should be raised for malformed inputs. If a serializer is not passed,
- the :class:`pyramid.session.PickleSerializer` serializer will be used.
+ the :class:`pyramid.session.JSONSerializer` serializer will be used.
.. warning::
- In :app:`Pyramid` 2.0 the default ``serializer`` option will change to
+ In :app:`Pyramid` 2.0 the default ``serializer`` option changed to
use :class:`pyramid.session.JSONSerializer`. See
:ref:`pickle_session_deprecation` for more information about why this
- change is being made.
+ change was made.
.. versionadded: 1.5a3
.. versionchanged: 1.10
- Added the ``samesite`` option and made the default ``Lax``.
+ Added the ``samesite`` option and made the default ``Lax``.
+
+ .. versionchanged: 2.0
+
+ Changed the default ``serializer`` to be an instance of
+ :class:`pyramid.session.JSONSerializer`.
"""
if serializer is None:
- serializer = PickleSerializer()
- warnings.warn(
- 'The default pickle serializer is deprecated as of Pyramid 1.9 '
- 'and it will be changed to use pyramid.session.JSONSerializer in '
- 'version 2.0. Explicitly set the serializer to avoid future '
- 'incompatibilities. See "Upcoming Changes to ISession in '
- 'Pyramid 2.0" for more information about this change.',
- DeprecationWarning,
- stacklevel=1,
- )
+ serializer = JSONSerializer()
signed_serializer = SignedSerializer(
secret, salt, hashalg, serializer=serializer
| diff --git a/tests/test_session.py b/tests/test_session.py
--- a/tests/test_session.py
+++ b/tests/test_session.py
@@ -364,10 +364,10 @@ def _serialize(self, value, salt=b'pyramid.session.', hashalg='sha512'):
import base64
import hashlib
import hmac
- import pickle
+ import json
digestmod = lambda: hashlib.new(hashalg)
- cstruct = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
+ cstruct = json.dumps(value).encode('utf-8')
sig = hmac.new(salt + b'secret', cstruct, digestmod).digest()
return base64.urlsafe_b64encode(sig + cstruct).rstrip(b'=')
@@ -505,24 +505,6 @@ def test_very_long_key(self):
self.assertEqual(result, None)
self.assertTrue('Set-Cookie' in dict(response.headerlist))
- def test_bad_pickle(self):
- import base64
- import hashlib
- import hmac
-
- digestmod = lambda: hashlib.new('sha512')
- # generated from dumping an object that cannot be found anymore, eg:
- # class Foo: pass
- # print(pickle.dumps(Foo()))
- cstruct = b'(i__main__\nFoo\np0\n(dp1\nb.'
- sig = hmac.new(b'pyramid.session.secret', cstruct, digestmod).digest()
- cookieval = base64.urlsafe_b64encode(sig + cstruct).rstrip(b'=')
-
- request = testing.DummyRequest()
- request.cookies['session'] = cookieval
- session = self._makeOne(request, secret='secret')
- self.assertEqual(session, {})
-
class Test_manage_accessed(unittest.TestCase):
def _makeOne(self, wrapped):
| 2.0 feature request: Require that sessions are JSON serializable
Currently the `ISession` interface requires that that keys and values of a session must be pickleable.
My proposal is to drop that requirement and simply state that all keys and values should be JSON serializeable. We should still give users the option to use the pickle serializer, but pick a more safe default.
---
Cookie based sessions right now by default use a PickleSerializer and then the cookie is protected using a secret and HMAC hashes. However if an attacker was able to get the the secret, they could potentially use the unpickle methods to execute code in the context of the running Python process.
| If JSON supported datetime this would be a no brainer for me.
Or... https://pypi.python.org/pypi/cbor2
@mmerickel there's gotta be a way to solve that problem though, with custom serializer/deserializer perhaps?
@ztane I don't want to add a 3rd party library to Pyramid core.
@bertjwregeer deserialization is where things get shady. On the serialization side you basically end up marshaling a custom syntax into a string, or go even further and instead of a straight json dump you instead iterate through and build out private structures containing the type information. This is all possible but then there's a fine line between this and basically re-inventing pickle.
I'm not ruling anything out right now but I think it'd ideally be something general purpose, pluggable with custom types and not in pyramid's core. This way people could use it in their own session factories and they could add serializers for types like datetime and any other custom objects.
I was suggesting we support only the lowest common denominator. Basically `ISession` would state that all content to be stored in the session is to be something that can be JSON serialized. I was also thinking we could make an exception for `datetime` (although I don't use those in sessions...)
If you want to have an `ISession` that allows for complex objects it can still be implemented of course, just like you can currently take `ISession` and use a JSON serializer instead.
I was simply asking for the default to be changed.
Yeah... that's not actually helpful for creating a session ecosystem though. Addons need to be able to use the session without worrying about the specific implementation. That's why pickle is so great right now - addons don't need to care about the serialization format and they can throw datetimes in there and get them back. If we switch to JSON in the way you suggested, they can no longer know what they will get back for any non-primitive types. The addon will need to ask itself... can I even store this object? What will I get back, if it even takes it? That's not an improvement.
IMHO storing anything but strings or small bits of data in the session is something that shouldn't be encouraged anyway.
Maybe there should be a way for the session to be interrogated before app startup, so that an extension can ask it what the serializer is, and fail out if it doesn't provide certain guarantees. This way for most cases deployers can be safer (and not possible allow an RCE if their secret is leaked), yet if required they can switch to a different serializer to support their add-ons and the ecosystem around it.
I switched from JSON to Msgpack a while back to minimize the size of server-side sessions in Redis (Pickle is FAST for builtins and standard-lib types, but the payload consumes more bytes than desired when using a LRU cache memory store).
The standard msgpack and json libs have similar approaches and implementations to serialization, and I will underscore mmerickel's concern that compatibility is an issue.
The concern isn't just for custom data types which might be stored in a session, but also
1. not all builtins survive a decode(encode(payload)) roundtrip. for example, lists and tuples will both become a list.
2. none of stdlib datatypes are handled, not just datetime. i.e. Decimal is out.
In terms of `loads`, dealing with the object_pairs_hook/object_hook is painfully slow as everything get inspected. It becomes a significant performance hit.
If you were to implement something / require a restriction... it would make sense to have a default JSON serializer that can be over-ridden by the user, and/or support an API where 3rd party plugins can register (de)serialization formats for non-standard types. You really don't want to support serialization for any unused object types.
(FWIW, I found encoding datetime&date as epoch seconds was the fastest option for deserialization.)
FWIW Django switched the default serializer to JSON (but kept the ability to set a Pickle serializer) and afaik apps just dealt with it and it hasn't caused any problems.
FTR I don't have a problem making this switch. I think there are several cons regarding json's limited type support but I think overall it's a win.
FWIW It might also make sense to use msgpack and use custom types to represent common Python datatypes to allow them to still be serialized. This works a bit better in msgpack than it does in JSON because msgpack has type information in it's serialization format so you can register your own types (represented by an integer) and when it deserializes you get that integer + the data back out. That avoids needing to do things like guess about datetimes or what have you.
Something like:
``` python
import enum
import datetime
import struct
import msgpack
class Types(enum.IntEnum):
datetime = 1
def default(obj):
if isinstance(obj, datetime.datetime):
return msgpack.ExtType(
Types.datetime.value,
struct.pack(">I", int(obj.timestamp())),
)
raise TypeError("Unknown type: %r" % (obj,))
def ext_hook(code, data):
type_ = Types(code)
if type_ == Types.datetime:
return datetime.datetime.fromtimestamp(struct.unpack(">I", data)[0])
return msgpack.ExtType(code, data)
data = {"foobar": datetime.datetime.now()}
packed = msgpack.packb(data, default=default, use_bin_type=True)
unpacked = msgpack.unpackb(packed, ext_hook=ext_hook, encoding="utf-8")
# \/ This actually fails because milliseconds don't roundtrip via timestamp
# but you get the idea.
assert data == unpacked
```
Is `msgpack` in the stdlib? I'd prefer not to pull in another dependency. I'd be okay with shipping an optional session implementation that uses `msgpack`, but I don't think it's a great idea for core.
`msgpack` isn't in the stdlib and there are a few implementations.
fwiw, it's much faster and more compact than `json`. if you stick to the core Python data types, it outperforms Pickle's speed on serialization and deserialization by a lot. the hooks for encoding/decoding other types are rather slow though, and each additional non-standard element introduces a performance hit. IIRC think 1-2 datetime elements is faster than cpickle, 3+ are increasingly slower.
I am a -1 on setting the default to something that requires installing another dependency. I have no problem with making it easily switchable (including back to pickle).
I haven't really taken any time to evaluate msgpack but I will say that if it's a pure-python (or well-supported), trustable dependency then I'm not against it.
msgpack has a C accelerator but it falls back to pure Python when that can't compile (or on PyPy).
I love msgpack, it is awesome especially if you need to interface with C code, or if you need to dump shtloads of data. However I don't think it adds anything over JSON in this case, especially without speed-ups.
If serialization of complex object structures were needed, then I'd rather have https://pypi.python.org/pypi/cbor2. Supports cycles. Pure python.
FWIW, I am +1 on breaking bw compat in 2.X, moving to an interface that mandates that session data be JSON serializable. We could provide a PickleSession bw compat shim and instructions on how it would be used and point to these instructions when a deserialization or serialization error is raised within the default cookie serializer.
I like @mmerickel's initial idea of defaulting to JSON but being pluggable. this would allow people to switch to pickle/msgpack/etc or even specify a json encoder that can handle datetime.
while i understand his later concern over 'what happens if a 3rd party plugin needs to support X/Y/Z' -- that's what unittests are for, and the plugin can recommend encoder/decoders.
Yeah it sounds like Pyramid would define JSON data types as the target and people are free to use something more strict (like pickle). The downside for any addon / app that wanted richer types (like what pickle provided) would be that they may need to document more clearly the types of objects they store in the session and the session impl would then need to try to accommodate that but it'd be "not pyramid's problem".
@jvanasco what you describe is pretty much already the case, it's just that technically a session implementation currently *must* support the serialization of all pickleable objects to meet the API. In 2.0, it will be changed such that it *must* support only the serialization of all JSON-serializable objects, and specialized sessioning implementations can abide by less strict rules, as Michael said.
See #3353 for how I deprecated pickle-based sessions in pyramid 1.10 in order to provide users with some info about the upcoming change in 2.0. | 2018-11-03T19:18:00.000 | -1.0 | [
"tests/test_session.py::TestSignedCookieSession::test_reissue_not_triggered",
"tests/test_session.py::TestSignedCookieSession::test_reissue_str_triggered",
"tests/test_session.py::TestSignedCookieSession::test_custom_hashalg",
"tests/test_session.py::TestSignedCookieSession::test_reissue_triggered",
"tests/test_session.py::TestSignedCookieSession::test_ctor_with_cookie_still_valid",
"tests/test_session.py::TestSignedCookieSession::test_reissue_never",
"tests/test_session.py::TestSignedCookieSession::test_custom_salt",
"tests/test_session.py::TestSignedCookieSession::test_timeout_never"
] | [
"tests/test_session.py::TestBaseCookieSession::test__set_cookie_real_webob_response",
"tests/test_session.py::TestBaseCookieSession::test_peek_flash_default_queue",
"tests/test_session.py::TestSignedCookieSession::test_custom_serializer",
"tests/test_session.py::TestSignedCookieSession::test_timeout_invalid",
"tests/test_session.py::TestSignedCookieSession::test_flash_allow_duplicate_true_and_msg_not_in_storage",
"tests/test_session.py::Test_manage_accessed::test_accessed_set",
"tests/test_session.py::TestSignedCookieSession::test_pop_flash_default_queue",
"tests/test_session.py::TestSignedCookieSession::test_invalidate",
"tests/test_session.py::TestBaseCookieSession::test_reissue_triggered",
"tests/test_session.py::TestSignedCookieSession::test_cookie_max_age_invalid",
"tests/test_session.py::TestBaseCookieSession::test_reissue_str_triggered",
"tests/test_session.py::TestSignedCookieSession::test_secret_mismatch",
"tests/test_session.py::TestSignedCookieSession::test_instance_conforms",
"tests/test_session.py::TestSignedCookieSession::test_get_csrf_token_new",
"tests/test_session.py::TestPickleSerializer::test_loads_raises_ValueError_on_invalid_data",
"tests/test_session.py::TestPickleSerializer::test_loads",
"tests/test_session.py::TestSignedCookieSession::test_flash_allow_duplicate_false",
"tests/test_session.py::TestSignedCookieSession::test_ctor_no_cookie",
"tests/test_session.py::TestSignedCookieSession::test__set_cookie_on_exception",
"tests/test_session.py::TestBaseCookieSession::test_flash_allow_duplicate_false",
"tests/test_session.py::TestBaseCookieSession::test_cookie_max_age_invalid",
"tests/test_session.py::TestSignedCookieSession::test_get_csrf_token",
"tests/test_session.py::TestBaseCookieSession::test_invalidate",
"tests/test_session.py::TestBaseCookieSession::test_flash_allow_duplicate_false_and_msg_not_in_storage",
"tests/test_session.py::TestBaseCookieSession::test_new_csrf_token",
"tests/test_session.py::TestSignedCookieSession::test__set_cookie_real_webob_response",
"tests/test_session.py::TestSignedCookieSession::test_invalid_data_size",
"tests/test_session.py::TestBaseCookieSession::test_cookie_is_set",
"tests/test_session.py::TestBaseCookieSession::test_flash_allow_duplicate_true_and_msg_not_in_storage",
"tests/test_session.py::TestBaseCookieSession::test_timeout_never",
"tests/test_session.py::TestBaseCookieSession::test_ctor_with_bad_cookie_not_tuple",
"tests/test_session.py::TestBaseCookieSession::test__set_cookie_on_exception",
"tests/test_session.py::TestSignedCookieSession::test_ctor_with_bad_cookie_not_tuple",
"tests/test_session.py::TestSignedCookieSession::test_salt_mismatch",
"tests/test_session.py::TestSignedCookieSession::test_cookie_is_set",
"tests/test_session.py::TestBaseCookieSession::test_flash_mixed",
"tests/test_session.py::TestSignedCookieSession::test_peek_flash_default_queue",
"tests/test_session.py::TestBaseCookieSession::test_reissue_invalid",
"tests/test_session.py::TestSignedCookieSession::test_hashalg_mismatch",
"tests/test_session.py::TestBaseCookieSession::test_changed",
"tests/test_session.py::TestSignedCookieSession::test_timeout_str",
"tests/test_session.py::TestBaseCookieSession::test_get_csrf_token_new",
"tests/test_session.py::TestSignedCookieSession::test_flash_mixed",
"tests/test_session.py::TestSignedCookieSession::test_ctor_with_cookie_expired",
"tests/test_session.py::TestBaseCookieSession::test_flash_default",
"tests/test_session.py::TestBaseCookieSession::test__set_cookie_on_exception_no_request_exception",
"tests/test_session.py::TestSignedCookieSession::test__set_cookie_cookieval_too_long",
"tests/test_session.py::TestSignedCookieSession::test_very_long_key",
"tests/test_session.py::TestBaseCookieSession::test_timeout_invalid",
"tests/test_session.py::TestBaseCookieSession::test__set_cookie_cookieval_too_long",
"tests/test_session.py::TestSignedCookieSession::test_new_csrf_token",
"tests/test_session.py::TestBaseCookieSession::test_timeout_str",
"tests/test_session.py::TestBaseCookieSession::test_instance_conforms",
"tests/test_session.py::TestSignedCookieSession::test_reissue_invalid",
"tests/test_session.py::TestSignedCookieSession::test_pop_flash_nodefault_queue",
"tests/test_session.py::TestSignedCookieSession::test_flash_allow_duplicate_false_and_msg_not_in_storage",
"tests/test_session.py::TestPickleSerializer::test_dumps",
"tests/test_session.py::TestPickleSerializer::test_loads_raises_ValueError_on_bad_import",
"tests/test_session.py::TestBaseCookieSession::test_reissue_never",
"tests/test_session.py::TestSignedCookieSession::test__set_cookie_on_exception_no_request_exception",
"tests/test_session.py::TestBaseCookieSession::test_get_csrf_token",
"tests/test_session.py::TestBaseCookieSession::test_ctor_with_cookie_still_valid",
"tests/test_session.py::TestBaseCookieSession::test_ctor_no_cookie",
"tests/test_session.py::TestBaseCookieSession::test_pop_flash_default_queue",
"tests/test_session.py::TestBaseCookieSession::test_peek_flash_nodefault_queue",
"tests/test_session.py::TestSignedCookieSession::test_timeout",
"tests/test_session.py::TestSignedCookieSession::test_set_cookie_with_exception",
"tests/test_session.py::TestSignedCookieSession::test_no_set_cookie_with_exception",
"tests/test_session.py::TestSignedCookieSession::test_peek_flash_nodefault_queue",
"tests/test_session.py::Test_manage_accessed::test_already_dirty",
"tests/test_session.py::TestBaseCookieSession::test__set_cookie_options",
"tests/test_session.py::TestBaseCookieSession::test_ctor_with_bad_cookie_cannot_deserialize",
"tests/test_session.py::TestBaseCookieSession::test_ctor_with_cookie_expired",
"tests/test_session.py::TestBaseCookieSession::test_no_set_cookie_with_exception",
"tests/test_session.py::TestBaseCookieSession::test_pop_flash_nodefault_queue",
"tests/test_session.py::TestSignedCookieSession::test__set_cookie_options",
"tests/test_session.py::TestBaseCookieSession::test_set_cookie_with_exception",
"tests/test_session.py::Test_manage_changed::test_it",
"tests/test_session.py::TestBaseCookieSession::test_reissue_not_triggered",
"tests/test_session.py::TestSignedCookieSession::test_flash_default",
"tests/test_session.py::TestSignedCookieSession::test_ctor_with_bad_cookie_cannot_deserialize",
"tests/test_session.py::Test_manage_accessed::test_accessed_without_renew",
"tests/test_session.py::TestSignedCookieSession::test_changed",
"tests/test_session.py::TestBaseCookieSession::test_timeout"
] | 3413 | "['2709']" |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 174