problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_19225 | rasdani/github-patches | git_diff | docker__docker-py-971 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tlsv1 alert protocol version on 1.7.1 and 1.7.2 but not on 1.7.0
Similar to #949 I'm discovering issues with latest versions of `docker-py` running against docker 1.10.2 instance. I'm using `docker.utils.kwargs_from_env(assert_hostname=False)`. Things work fine with version 1.7.0.
Docker client is initialized via
```
client = docker.Client(
version='auto',
**docker.utils.kwargs_from_env(assert_hostname=False))
```
with docker environment variables being set to the following (via docker-machine)
```
DOCKER_HOST=tcp://192.168.156.137:2376
DOCKER_MACHINE_NAME=dev2
DOCKER_TLS_VERIFY=1
DOCKER_CERT_PATH=/Users/benjixx/.docker/machine/machines/dev2
```
docker-py 1.7.1 and 1.7.2 now raise the following exception:
```
DockerException: Error while fetching server API version: [Errno 1] _ssl.c:507: error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version
```
Any idea what's happening here?
</issue>
<code>
[start of docker/tls.py]
1 import os
2
3 from . import errors
4 from .ssladapter import ssladapter
5
6
7 class TLSConfig(object):
8 cert = None
9 ca_cert = None
10 verify = None
11 ssl_version = None
12
13 def __init__(self, client_cert=None, ca_cert=None, verify=None,
14 ssl_version=None, assert_hostname=None,
15 assert_fingerprint=None):
16 # Argument compatibility/mapping with
17 # https://docs.docker.com/engine/articles/https/
18 # This diverges from the Docker CLI in that users can specify 'tls'
19 # here, but also disable any public/default CA pool verification by
20 # leaving tls_verify=False
21
22 self.ssl_version = ssl_version
23 self.assert_hostname = assert_hostname
24 self.assert_fingerprint = assert_fingerprint
25
26 # "tls" and "tls_verify" must have both or neither cert/key files
27 # In either case, Alert the user when both are expected, but any are
28 # missing.
29
30 if client_cert:
31 try:
32 tls_cert, tls_key = client_cert
33 except ValueError:
34 raise errors.TLSParameterError(
35 'client_config must be a tuple of'
36 ' (client certificate, key file)'
37 )
38
39 if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
40 not os.path.isfile(tls_key)):
41 raise errors.TLSParameterError(
42 'Path to a certificate and key files must be provided'
43 ' through the client_config param'
44 )
45 self.cert = (tls_cert, tls_key)
46
47 # If verify is set, make sure the cert exists
48 self.verify = verify
49 self.ca_cert = ca_cert
50 if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
51 raise errors.TLSParameterError(
52 'Invalid CA certificate provided for `tls_ca_cert`.'
53 )
54
55 def configure_client(self, client):
56 client.ssl_version = self.ssl_version
57
58 if self.verify and self.ca_cert:
59 client.verify = self.ca_cert
60 else:
61 client.verify = self.verify
62
63 if self.cert:
64 client.cert = self.cert
65
66 client.mount('https://', ssladapter.SSLAdapter(
67 ssl_version=self.ssl_version,
68 assert_hostname=self.assert_hostname,
69 assert_fingerprint=self.assert_fingerprint,
70 ))
71
[end of docker/tls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/tls.py b/docker/tls.py
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -1,4 +1,5 @@
import os
+import ssl
from . import errors
from .ssladapter import ssladapter
@@ -19,10 +20,14 @@
# here, but also disable any public/default CA pool verification by
# leaving tls_verify=False
- self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
+ # TLS v1.0 seems to be the safest default; SSLv23 fails in mysterious
+ # ways: https://github.com/docker/docker-py/issues/963
+
+ self.ssl_version = ssl_version or ssl.PROTOCOL_TLSv1
+
# "tls" and "tls_verify" must have both or neither cert/key files
# In either case, Alert the user when both are expected, but any are
# missing.
| {"golden_diff": "diff --git a/docker/tls.py b/docker/tls.py\n--- a/docker/tls.py\n+++ b/docker/tls.py\n@@ -1,4 +1,5 @@\n import os\n+import ssl\n \n from . import errors\n from .ssladapter import ssladapter\n@@ -19,10 +20,14 @@\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n \n- self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n \n+ # TLS v1.0 seems to be the safest default; SSLv23 fails in mysterious\n+ # ways: https://github.com/docker/docker-py/issues/963\n+\n+ self.ssl_version = ssl_version or ssl.PROTOCOL_TLSv1\n+\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n", "issue": "tlsv1 alert protocol version on 1.7.1 and 1.7.2 but not on 1.7.0\nSimilar to #949 I'm discovering issues with latest versions of `docker-py` running against docker 1.10.2 instance. I'm using `docker.utils.kwargs_from_env(assert_hostname=False)`. Things work fine with version 1.7.0.\n\nDocker client is initialized via\n\n```\nclient = docker.Client(\n version='auto',\n **docker.utils.kwargs_from_env(assert_hostname=False))\n```\n\nwith docker environment variables being set to the following (via docker-machine)\n\n```\nDOCKER_HOST=tcp://192.168.156.137:2376\nDOCKER_MACHINE_NAME=dev2\nDOCKER_TLS_VERIFY=1\nDOCKER_CERT_PATH=/Users/benjixx/.docker/machine/machines/dev2\n```\n\ndocker-py 1.7.1 and 1.7.2 now raise the following exception:\n\n```\nDockerException: Error while fetching server API version: [Errno 1] _ssl.c:507: error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version\n```\n\nAny idea what's happening here?\n\n", "before_files": [{"content": "import os\n\nfrom . import errors\nfrom .ssladapter import ssladapter\n\n\nclass TLSConfig(object):\n cert = None\n ca_cert = None\n verify = None\n ssl_version = None\n\n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n ssl_version=None, assert_hostname=None,\n assert_fingerprint=None):\n # Argument compatibility/mapping with\n # https://docs.docker.com/engine/articles/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n\n if client_cert:\n try:\n tls_cert, tls_key = client_cert\n except ValueError:\n raise errors.TLSParameterError(\n 'client_config must be a tuple of'\n ' (client certificate, key file)'\n )\n\n if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or\n not os.path.isfile(tls_key)):\n raise errors.TLSParameterError(\n 'Path to a certificate and key files must be provided'\n ' through the client_config param'\n )\n self.cert = (tls_cert, tls_key)\n\n # If verify is set, make sure the cert exists\n self.verify = verify\n self.ca_cert = ca_cert\n if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):\n raise errors.TLSParameterError(\n 'Invalid CA certificate provided for `tls_ca_cert`.'\n )\n\n def configure_client(self, client):\n client.ssl_version = self.ssl_version\n\n if self.verify and self.ca_cert:\n client.verify = self.ca_cert\n else:\n client.verify = self.verify\n\n if self.cert:\n client.cert = self.cert\n\n client.mount('https://', ssladapter.SSLAdapter(\n ssl_version=self.ssl_version,\n assert_hostname=self.assert_hostname,\n assert_fingerprint=self.assert_fingerprint,\n ))\n", "path": "docker/tls.py"}]} | 1,449 | 223 |
gh_patches_debug_36965 | rasdani/github-patches | git_diff | mne-tools__mne-bids-750 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
STY: Move to pydata-sphinx-theme
Now that NumPy, SciPy (in progress), pandas, and (most importantly I think) MNE-Python have moved to pydata-sphinx-theme, it might be nice to move `mne-bids` over, too, for cohesiveness. Any thoughts or objections?
</issue>
<code>
[start of doc/conf.py]
1 """Configure details for documentation with sphinx."""
2 import os
3 import sys
4 from datetime import date
5
6 import sphinx_gallery # noqa: F401
7 from sphinx_gallery.sorting import ExampleTitleSortKey
8 import sphinx_bootstrap_theme
9
10 import mne_bids
11
12
13 # If extensions (or modules to document with autodoc) are in another directory,
14 # add these directories to sys.path here. If the directory is relative to the
15 # documentation root, use os.path.abspath to make it absolute, like shown here.
16 curdir = os.path.dirname(__file__)
17 sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))
18 sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
19
20
21 # -- General configuration ------------------------------------------------
22
23 # If your documentation needs a minimal Sphinx version, state it here.
24 #
25 # needs_sphinx = '1.0'
26
27 # Add any Sphinx extension module names here, as strings. They can be
28 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
29 # ones.
30 extensions = [
31 'sphinx.ext.githubpages',
32 'sphinx.ext.autodoc',
33 'sphinx.ext.mathjax',
34 'sphinx.ext.viewcode',
35 'sphinx.ext.autosummary',
36 'sphinx.ext.doctest',
37 'sphinx.ext.intersphinx',
38 'sphinx_gallery.gen_gallery',
39 'numpydoc',
40 'sphinx_copybutton',
41 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py
42 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py
43 ]
44
45 # configure sphinx-copybutton
46 copybutton_prompt_text = r">>> |\.\.\. |\$ "
47 copybutton_prompt_is_regexp = True
48
49 # configure numpydoc
50 numpydoc_xref_param_type = True
51 numpydoc_class_members_toctree = False
52 numpydoc_attributes_as_param_list = True
53 numpydoc_xref_aliases = {
54 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',
55 }
56 numpydoc_xref_ignore = {
57 # words
58 'of',
59 }
60
61
62 # generate autosummary even if no references
63 autosummary_generate = True
64 autodoc_default_options = {'inherited-members': None}
65 default_role = 'autolink' # XXX silently allows bad syntax, someone should fix
66
67 # The suffix(es) of source filenames.
68 # You can specify multiple suffix as a list of string:
69 #
70 # source_suffix = ['.rst', '.md']
71 source_suffix = '.rst'
72
73 # The master toctree document.
74 master_doc = 'index'
75
76 # General information about the project.
77 project = u'mne_bids'
78 td = date.today()
79 copyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,
80 td.isoformat())
81
82 author = u'MNE Developers'
83
84 # The version info for the project you're documenting, acts as replacement for
85 # |version| and |release|, also used in various other places throughout the
86 # built documents.
87 #
88 # The short X.Y version.
89 version = mne_bids.__version__
90 # The full version, including alpha/beta/rc tags.
91 release = version
92
93 # List of patterns, relative to source directory, that match files and
94 # directories to ignore when looking for source files.
95 # This patterns also effect to html_static_path and html_extra_path
96 exclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',
97 '.DS_Store']
98
99 # HTML options (e.g., theme)
100 # see: https://sphinx-bootstrap-theme.readthedocs.io/en/latest/README.html
101 # Clean up sidebar: Do not show "Source" link
102 html_show_sourcelink = False
103
104 html_theme = 'bootstrap'
105 html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
106
107 # Add any paths that contain templates here, relative to this directory.
108 templates_path = ['_templates']
109 html_static_path = ['_static']
110 html_css_files = ['style.css']
111
112 # Theme options are theme-specific and customize the look and feel of a theme
113 # further. For a list of options available for each theme, see the
114 # documentation.
115 html_theme_options = {
116 'navbar_title': 'MNE-BIDS',
117 'bootswatch_theme': "flatly",
118 'navbar_sidebarrel': False, # no "previous / next" navigation
119 'navbar_pagenav': False, # no "Page" navigation in sidebar
120 'bootstrap_version': "3",
121 'navbar_links': [
122 ("News", "whats_new"),
123 ("Install", "install"),
124 ("Use", "use"),
125 ("API", "api"),
126 ("CLI", "generated/cli"),
127 ("Contribute!", "contribute")
128 ]}
129
130 html_sidebars = {'**': ['localtoc.html']}
131
132 # Example configuration for intersphinx: refer to the Python standard library.
133 intersphinx_mapping = {
134 'python': ('https://docs.python.org/3', None),
135 'mne': ('https://mne.tools/dev', None),
136 'numpy': ('https://numpy.org/devdocs', None),
137 'scipy': ('https://scipy.github.io/devdocs', None),
138 'matplotlib': ('https://matplotlib.org', None),
139 'nilearn': ('https://nilearn.github.io', None),
140 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),
141 'nibabel': ('https://nipy.org/nibabel', None),
142 }
143 intersphinx_timeout = 5
144
145 # Resolve binder filepath_prefix. From the docs:
146 # "A prefix to append to the filepath in the Binder links. You should use this
147 # if you will store your built documentation in a sub-folder of a repository,
148 # instead of in the root."
149 # we will store dev docs in a `dev` subdirectory and all other docs in a
150 # directory "v" + version_str. E.g., "v0.3"
151 if 'dev' in version:
152 filepath_prefix = 'dev'
153 else:
154 filepath_prefix = 'v{}'.format(version)
155
156 sphinx_gallery_conf = {
157 'doc_module': 'mne_bids',
158 'reference_url': {
159 'mne_bids': None,
160 },
161 'backreferences_dir': 'generated',
162 'examples_dirs': '../examples',
163 'within_subsection_order': ExampleTitleSortKey,
164 'gallery_dirs': 'auto_examples',
165 'filename_pattern': '^((?!sgskip).)*$',
166 'binder': {
167 # Required keys
168 'org': 'mne-tools',
169 'repo': 'mne-bids',
170 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.
171 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).
172 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.
173 'dependencies': [
174 '../test_requirements.txt',
175 './requirements.txt',
176 ],
177 }
178 }
179
[end of doc/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -5,7 +5,6 @@
import sphinx_gallery # noqa: F401
from sphinx_gallery.sorting import ExampleTitleSortKey
-import sphinx_bootstrap_theme
import mne_bids
@@ -74,7 +73,7 @@
master_doc = 'index'
# General information about the project.
-project = u'mne_bids'
+project = u'MNE-BIDS'
td = date.today()
copyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,
td.isoformat())
@@ -97,12 +96,10 @@
'.DS_Store']
# HTML options (e.g., theme)
-# see: https://sphinx-bootstrap-theme.readthedocs.io/en/latest/README.html
-# Clean up sidebar: Do not show "Source" link
html_show_sourcelink = False
+html_copy_source = False
-html_theme = 'bootstrap'
-html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
+html_theme = 'pydata_sphinx_theme'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -113,21 +110,31 @@
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
- 'navbar_title': 'MNE-BIDS',
- 'bootswatch_theme': "flatly",
- 'navbar_sidebarrel': False, # no "previous / next" navigation
- 'navbar_pagenav': False, # no "Page" navigation in sidebar
- 'bootstrap_version': "3",
- 'navbar_links': [
- ("News", "whats_new"),
- ("Install", "install"),
- ("Use", "use"),
- ("API", "api"),
- ("CLI", "generated/cli"),
- ("Contribute!", "contribute")
- ]}
-
-html_sidebars = {'**': ['localtoc.html']}
+ 'icon_links': [
+ dict(name='GitHub',
+ url='https://github.com/mne-tools/mne-bids',
+ icon='fab fa-github-square'),
+ ],
+ 'icon_links_label': 'Quick Links', # for screen reader
+ 'use_edit_page_button': False,
+ 'navigation_with_keys': False,
+ 'show_toc_level': 1,
+}
+
+html_context = {
+ 'versions_dropdown': {
+ 'dev': 'v0.8 (devel)',
+ 'stable': 'v0.7 (stable)',
+ 'v0.6': 'v0.6',
+ 'v0.5': 'v0.5',
+ 'v0.4': 'v0.4',
+ 'v0.3': 'v0.3',
+ 'v0.2': 'v0.2',
+ 'v0.1': 'v0.1',
+ },
+}
+
+html_sidebars = {}
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
| {"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -5,7 +5,6 @@\n \n import sphinx_gallery # noqa: F401\n from sphinx_gallery.sorting import ExampleTitleSortKey\n-import sphinx_bootstrap_theme\n \n import mne_bids\n \n@@ -74,7 +73,7 @@\n master_doc = 'index'\n \n # General information about the project.\n-project = u'mne_bids'\n+project = u'MNE-BIDS'\n td = date.today()\n copyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n@@ -97,12 +96,10 @@\n '.DS_Store']\n \n # HTML options (e.g., theme)\n-# see: https://sphinx-bootstrap-theme.readthedocs.io/en/latest/README.html\n-# Clean up sidebar: Do not show \"Source\" link\n html_show_sourcelink = False\n+html_copy_source = False\n \n-html_theme = 'bootstrap'\n-html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n+html_theme = 'pydata_sphinx_theme'\n \n # Add any paths that contain templates here, relative to this directory.\n templates_path = ['_templates']\n@@ -113,21 +110,31 @@\n # further. For a list of options available for each theme, see the\n # documentation.\n html_theme_options = {\n- 'navbar_title': 'MNE-BIDS',\n- 'bootswatch_theme': \"flatly\",\n- 'navbar_sidebarrel': False, # no \"previous / next\" navigation\n- 'navbar_pagenav': False, # no \"Page\" navigation in sidebar\n- 'bootstrap_version': \"3\",\n- 'navbar_links': [\n- (\"News\", \"whats_new\"),\n- (\"Install\", \"install\"),\n- (\"Use\", \"use\"),\n- (\"API\", \"api\"),\n- (\"CLI\", \"generated/cli\"),\n- (\"Contribute!\", \"contribute\")\n- ]}\n-\n-html_sidebars = {'**': ['localtoc.html']}\n+ 'icon_links': [\n+ dict(name='GitHub',\n+ url='https://github.com/mne-tools/mne-bids',\n+ icon='fab fa-github-square'),\n+ ],\n+ 'icon_links_label': 'Quick Links', # for screen reader\n+ 'use_edit_page_button': False,\n+ 'navigation_with_keys': False,\n+ 'show_toc_level': 1,\n+}\n+\n+html_context = {\n+ 'versions_dropdown': {\n+ 'dev': 'v0.8 (devel)',\n+ 'stable': 'v0.7 (stable)',\n+ 'v0.6': 'v0.6',\n+ 'v0.5': 'v0.5',\n+ 'v0.4': 'v0.4',\n+ 'v0.3': 'v0.3',\n+ 'v0.2': 'v0.2',\n+ 'v0.1': 'v0.1',\n+ },\n+}\n+\n+html_sidebars = {}\n \n # Example configuration for intersphinx: refer to the Python standard library.\n intersphinx_mapping = {\n", "issue": "STY: Move to pydata-sphinx-theme\nNow that NumPy, SciPy (in progress), pandas, and (most importantly I think) MNE-Python have moved to pydata-sphinx-theme, it might be nice to move `mne-bids` over, too, for cohesiveness. Any thoughts or objections?\n", "before_files": [{"content": "\"\"\"Configure details for documentation with sphinx.\"\"\"\nimport os\nimport sys\nfrom datetime import date\n\nimport sphinx_gallery # noqa: F401\nfrom sphinx_gallery.sorting import ExampleTitleSortKey\nimport sphinx_bootstrap_theme\n\nimport mne_bids\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurdir = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))\nsys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx_gallery.gen_gallery',\n 'numpydoc',\n 'sphinx_copybutton',\n 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py\n 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py\n]\n\n# configure sphinx-copybutton\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ \"\ncopybutton_prompt_is_regexp = True\n\n# configure numpydoc\nnumpydoc_xref_param_type = True\nnumpydoc_class_members_toctree = False\nnumpydoc_attributes_as_param_list = True\nnumpydoc_xref_aliases = {\n 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',\n}\nnumpydoc_xref_ignore = {\n # words\n 'of',\n}\n\n\n# generate autosummary even if no references\nautosummary_generate = True\nautodoc_default_options = {'inherited-members': None}\ndefault_role = 'autolink' # XXX silently allows bad syntax, someone should fix\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'mne_bids'\ntd = date.today()\ncopyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',\n '.DS_Store']\n\n# HTML options (e.g., theme)\n# see: https://sphinx-bootstrap-theme.readthedocs.io/en/latest/README.html\n# Clean up sidebar: Do not show \"Source\" link\nhtml_show_sourcelink = False\n\nhtml_theme = 'bootstrap'\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nhtml_static_path = ['_static']\nhtml_css_files = ['style.css']\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'navbar_title': 'MNE-BIDS',\n 'bootswatch_theme': \"flatly\",\n 'navbar_sidebarrel': False, # no \"previous / next\" navigation\n 'navbar_pagenav': False, # no \"Page\" navigation in sidebar\n 'bootstrap_version': \"3\",\n 'navbar_links': [\n (\"News\", \"whats_new\"),\n (\"Install\", \"install\"),\n (\"Use\", \"use\"),\n (\"API\", \"api\"),\n (\"CLI\", \"generated/cli\"),\n (\"Contribute!\", \"contribute\")\n ]}\n\nhtml_sidebars = {'**': ['localtoc.html']}\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'mne': ('https://mne.tools/dev', None),\n 'numpy': ('https://numpy.org/devdocs', None),\n 'scipy': ('https://scipy.github.io/devdocs', None),\n 'matplotlib': ('https://matplotlib.org', None),\n 'nilearn': ('https://nilearn.github.io', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),\n 'nibabel': ('https://nipy.org/nibabel', None),\n}\nintersphinx_timeout = 5\n\n# Resolve binder filepath_prefix. From the docs:\n# \"A prefix to append to the filepath in the Binder links. You should use this\n# if you will store your built documentation in a sub-folder of a repository,\n# instead of in the root.\"\n# we will store dev docs in a `dev` subdirectory and all other docs in a\n# directory \"v\" + version_str. E.g., \"v0.3\"\nif 'dev' in version:\n filepath_prefix = 'dev'\nelse:\n filepath_prefix = 'v{}'.format(version)\n\nsphinx_gallery_conf = {\n 'doc_module': 'mne_bids',\n 'reference_url': {\n 'mne_bids': None,\n },\n 'backreferences_dir': 'generated',\n 'examples_dirs': '../examples',\n 'within_subsection_order': ExampleTitleSortKey,\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'binder': {\n # Required keys\n 'org': 'mne-tools',\n 'repo': 'mne-bids',\n 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.\n 'dependencies': [\n '../test_requirements.txt',\n './requirements.txt',\n ],\n }\n}\n", "path": "doc/conf.py"}]} | 2,599 | 719 |
gh_patches_debug_12849 | rasdani/github-patches | git_diff | OCA__server-tools-37 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] mass_editing - Search in ir.model.fields not working
I setup the mass_editing for res.partner then i go to:
Settings -> Technical -> Database Structure -> Fields
and go to the Filter with ('model_id','=',61) where 61 is the id of res.partner in res.model. By typ "res.partner" and select "Partner" from the suggestion.
Then i get the following error:
File "[..]/mass_editing/mass_editing.py", line 34, in search
model_domain += [('model_id', 'in', map(int, domain[2][1:-1].split(',')))]
TypeError: 'int' object has no attribute '**getitem**'
</issue>
<code>
[start of mass_editing/mass_editing.py]
1 # -*- coding: utf-8 -*-
2 ##############################################################################
3 #
4 # This module uses OpenERP, Open Source Management Solution Framework.
5 # Copyright (C):
6 # 2012-Today Serpent Consulting Services (<http://www.serpentcs.com>)
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with this program. If not, see <http://www.gnu.org/licenses/>
20 #
21 ##############################################################################
22
23 from openerp.osv import orm, fields, osv
24 from openerp.tools.translate import _
25
26
27 class ir_model_fields(orm.Model):
28 _inherit = 'ir.model.fields'
29
30 def search(
31 self, cr, uid, args, offset=0, limit=0, order=None, context=None,
32 count=False):
33 model_domain = []
34 for domain in args:
35 if domain[0] == 'model_id' and domain[2]\
36 and type(domain[2]) != list:
37 model_domain += [(
38 'model_id', 'in', map(int, domain[2][1:-1].split(',')))]
39 else:
40 model_domain.append(domain)
41 return super(ir_model_fields, self).search(
42 cr, uid, model_domain, offset=offset, limit=limit, order=order,
43 context=context, count=count)
44
45 ir_model_fields()
46
47
48 class mass_object(orm.Model):
49 _name = "mass.object"
50
51 _columns = {
52 'name': fields.char("Name", size=64, required=True, select=1),
53 'model_id': fields.many2one(
54 'ir.model', 'Model', required=True, select=1),
55 'field_ids': fields.many2many(
56 'ir.model.fields', 'mass_field_rel', 'mass_id', 'field_id',
57 'Fields'),
58 'ref_ir_act_window': fields.many2one(
59 'ir.actions.act_window', 'Sidebar Action', readonly=True,
60 help="Sidebar action to make this template available on records \
61 of the related document model"),
62 'ref_ir_value': fields.many2one(
63 'ir.values', 'Sidebar Button', readonly=True,
64 help="Sidebar button to open the sidebar action"),
65 'model_ids': fields.many2many('ir.model', string='Model List')
66 }
67
68 _sql_constraints = [
69 ('name_uniq', 'unique (name)', _('Name must be unique!')),
70 ]
71
72 def onchange_model(self, cr, uid, ids, model_id, context=None):
73 if context is None:
74 context = {}
75 if not model_id:
76 return {'value': {'model_ids': [(6, 0, [])]}}
77 model_ids = [model_id]
78 model_obj = self.pool.get('ir.model')
79 active_model_obj = self.pool.get(model_obj.browse(
80 cr, uid, model_id).model)
81 if active_model_obj._inherits:
82 for key, val in active_model_obj._inherits.items():
83 found_model_ids = model_obj.search(
84 cr, uid, [('model', '=', key)], context=context)
85 model_ids += found_model_ids
86 return {'value': {'model_ids': [(6, 0, model_ids)]}}
87
88 def create_action(self, cr, uid, ids, context=None):
89 vals = {}
90 action_obj = self.pool.get('ir.actions.act_window')
91 ir_values_obj = self.pool.get('ir.values')
92 for data in self.browse(cr, uid, ids, context=context):
93 src_obj = data.model_id.model
94 button_name = _('Mass Editing (%s)') % data.name
95 vals['ref_ir_act_window'] = action_obj.create(cr, uid, {
96 'name': button_name,
97 'type': 'ir.actions.act_window',
98 'res_model': 'mass.editing.wizard',
99 'src_model': src_obj,
100 'view_type': 'form',
101 'context': "{'mass_editing_object' : %d}" % (data.id),
102 'view_mode': 'form,tree',
103 'target': 'new',
104 'auto_refresh': 1,
105 }, context)
106 vals['ref_ir_value'] = ir_values_obj.create(cr, uid, {
107 'name': button_name,
108 'model': src_obj,
109 'key2': 'client_action_multi',
110 'value': (
111 "ir.actions.act_window,"
112 + str(vals['ref_ir_act_window'])),
113 'object': True,
114 }, context)
115 self.write(cr, uid, ids, {
116 'ref_ir_act_window': vals.get('ref_ir_act_window', False),
117 'ref_ir_value': vals.get('ref_ir_value', False),
118 }, context)
119 return True
120
121 def unlink_action(self, cr, uid, ids, context=None):
122 for template in self.browse(cr, uid, ids, context=context):
123 try:
124 if template.ref_ir_act_window:
125 self.pool.get('ir.actions.act_window').unlink(
126 cr, uid, template.ref_ir_act_window.id, context)
127 if template.ref_ir_value:
128 ir_values_obj = self.pool.get('ir.values')
129 ir_values_obj.unlink(
130 cr, uid, template.ref_ir_value.id, context)
131 except:
132 raise osv.except_osv(
133 _("Warning"),
134 _("Deletion of the action record failed."))
135 return True
136
137 def unlink(self, cr, uid, ids, context=None):
138 self.unlink_action(cr, uid, ids, context)
139 return super(mass_object, self).unlink(cr, uid, ids, context)
140
141 def copy(self, cr, uid, record_id, default=None, context=None):
142 if default is None:
143 default = {}
144 default.update({'name': '', 'field_ids': []})
145 return super(mass_object, self).copy(
146 cr, uid, record_id, default, context)
147
148 mass_object()
149 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
150
[end of mass_editing/mass_editing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mass_editing/mass_editing.py b/mass_editing/mass_editing.py
--- a/mass_editing/mass_editing.py
+++ b/mass_editing/mass_editing.py
@@ -32,10 +32,11 @@
count=False):
model_domain = []
for domain in args:
- if domain[0] == 'model_id' and domain[2]\
- and type(domain[2]) != list:
- model_domain += [(
- 'model_id', 'in', map(int, domain[2][1:-1].split(',')))]
+ if (len(domain) > 2 and domain[0] == 'model_id'
+ and isinstance(domain[2], basestring)):
+ model_domain += [
+ ('model_id', 'in', map(int, domain[2][1:-1].split(',')))
+ ]
else:
model_domain.append(domain)
return super(ir_model_fields, self).search(
| {"golden_diff": "diff --git a/mass_editing/mass_editing.py b/mass_editing/mass_editing.py\n--- a/mass_editing/mass_editing.py\n+++ b/mass_editing/mass_editing.py\n@@ -32,10 +32,11 @@\n count=False):\n model_domain = []\n for domain in args:\n- if domain[0] == 'model_id' and domain[2]\\\n- and type(domain[2]) != list:\n- model_domain += [(\n- 'model_id', 'in', map(int, domain[2][1:-1].split(',')))]\n+ if (len(domain) > 2 and domain[0] == 'model_id'\n+ and isinstance(domain[2], basestring)):\n+ model_domain += [\n+ ('model_id', 'in', map(int, domain[2][1:-1].split(',')))\n+ ]\n else:\n model_domain.append(domain)\n return super(ir_model_fields, self).search(\n", "issue": "[Bug] mass_editing - Search in ir.model.fields not working\nI setup the mass_editing for res.partner then i go to:\nSettings -> Technical -> Database Structure -> Fields\nand go to the Filter with ('model_id','=',61) where 61 is the id of res.partner in res.model. By typ \"res.partner\" and select \"Partner\" from the suggestion.\n\nThen i get the following error:\nFile \"[..]/mass_editing/mass_editing.py\", line 34, in search\n model_domain += [('model_id', 'in', map(int, domain[2][1:-1].split(',')))]\nTypeError: 'int' object has no attribute '**getitem**'\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# This module uses OpenERP, Open Source Management Solution Framework.\n# Copyright (C):\n# 2012-Today Serpent Consulting Services (<http://www.serpentcs.com>)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>\n#\n##############################################################################\n\nfrom openerp.osv import orm, fields, osv\nfrom openerp.tools.translate import _\n\n\nclass ir_model_fields(orm.Model):\n _inherit = 'ir.model.fields'\n\n def search(\n self, cr, uid, args, offset=0, limit=0, order=None, context=None,\n count=False):\n model_domain = []\n for domain in args:\n if domain[0] == 'model_id' and domain[2]\\\n and type(domain[2]) != list:\n model_domain += [(\n 'model_id', 'in', map(int, domain[2][1:-1].split(',')))]\n else:\n model_domain.append(domain)\n return super(ir_model_fields, self).search(\n cr, uid, model_domain, offset=offset, limit=limit, order=order,\n context=context, count=count)\n\nir_model_fields()\n\n\nclass mass_object(orm.Model):\n _name = \"mass.object\"\n\n _columns = {\n 'name': fields.char(\"Name\", size=64, required=True, select=1),\n 'model_id': fields.many2one(\n 'ir.model', 'Model', required=True, select=1),\n 'field_ids': fields.many2many(\n 'ir.model.fields', 'mass_field_rel', 'mass_id', 'field_id',\n 'Fields'),\n 'ref_ir_act_window': fields.many2one(\n 'ir.actions.act_window', 'Sidebar Action', readonly=True,\n help=\"Sidebar action to make this template available on records \\\n of the related document model\"),\n 'ref_ir_value': fields.many2one(\n 'ir.values', 'Sidebar Button', readonly=True,\n help=\"Sidebar button to open the sidebar action\"),\n 'model_ids': fields.many2many('ir.model', string='Model List')\n }\n\n _sql_constraints = [\n ('name_uniq', 'unique (name)', _('Name must be unique!')),\n ]\n\n def onchange_model(self, cr, uid, ids, model_id, context=None):\n if context is None:\n context = {}\n if not model_id:\n return {'value': {'model_ids': [(6, 0, [])]}}\n model_ids = [model_id]\n model_obj = self.pool.get('ir.model')\n active_model_obj = self.pool.get(model_obj.browse(\n cr, uid, model_id).model)\n if active_model_obj._inherits:\n for key, val in active_model_obj._inherits.items():\n found_model_ids = model_obj.search(\n cr, uid, [('model', '=', key)], context=context)\n model_ids += found_model_ids\n return {'value': {'model_ids': [(6, 0, model_ids)]}}\n\n def create_action(self, cr, uid, ids, context=None):\n vals = {}\n action_obj = self.pool.get('ir.actions.act_window')\n ir_values_obj = self.pool.get('ir.values')\n for data in self.browse(cr, uid, ids, context=context):\n src_obj = data.model_id.model\n button_name = _('Mass Editing (%s)') % data.name\n vals['ref_ir_act_window'] = action_obj.create(cr, uid, {\n 'name': button_name,\n 'type': 'ir.actions.act_window',\n 'res_model': 'mass.editing.wizard',\n 'src_model': src_obj,\n 'view_type': 'form',\n 'context': \"{'mass_editing_object' : %d}\" % (data.id),\n 'view_mode': 'form,tree',\n 'target': 'new',\n 'auto_refresh': 1,\n }, context)\n vals['ref_ir_value'] = ir_values_obj.create(cr, uid, {\n 'name': button_name,\n 'model': src_obj,\n 'key2': 'client_action_multi',\n 'value': (\n \"ir.actions.act_window,\"\n + str(vals['ref_ir_act_window'])),\n 'object': True,\n }, context)\n self.write(cr, uid, ids, {\n 'ref_ir_act_window': vals.get('ref_ir_act_window', False),\n 'ref_ir_value': vals.get('ref_ir_value', False),\n }, context)\n return True\n\n def unlink_action(self, cr, uid, ids, context=None):\n for template in self.browse(cr, uid, ids, context=context):\n try:\n if template.ref_ir_act_window:\n self.pool.get('ir.actions.act_window').unlink(\n cr, uid, template.ref_ir_act_window.id, context)\n if template.ref_ir_value:\n ir_values_obj = self.pool.get('ir.values')\n ir_values_obj.unlink(\n cr, uid, template.ref_ir_value.id, context)\n except:\n raise osv.except_osv(\n _(\"Warning\"),\n _(\"Deletion of the action record failed.\"))\n return True\n\n def unlink(self, cr, uid, ids, context=None):\n self.unlink_action(cr, uid, ids, context)\n return super(mass_object, self).unlink(cr, uid, ids, context)\n\n def copy(self, cr, uid, record_id, default=None, context=None):\n if default is None:\n default = {}\n default.update({'name': '', 'field_ids': []})\n return super(mass_object, self).copy(\n cr, uid, record_id, default, context)\n\nmass_object()\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n", "path": "mass_editing/mass_editing.py"}]} | 2,426 | 220 |
gh_patches_debug_12926 | rasdani/github-patches | git_diff | bokeh__bokeh-6804 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Automatic configuration of Slider.format
Integer sliders should use integer formatting.
</issue>
<code>
[start of bokeh/models/widgets/sliders.py]
1 """ Various kinds of slider widgets.
2
3 """
4 from __future__ import absolute_import
5
6 from ...core.has_props import abstract
7 from ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override
8 from ...core.enums import SliderCallbackPolicy
9 from ..callbacks import Callback
10 from .widget import Widget
11
12 @abstract
13 class AbstractSlider(Widget):
14 """ """
15
16 title = String(default="", help="""
17 Slider's label.
18 """)
19
20 show_value = Bool(default=True, help="""
21 Whether or not show slider's value.
22 """)
23
24 format = String(help="""
25 """)
26
27 orientation = Enum("horizontal", "vertical", help="""
28 Orient the slider either horizontally (default) or vertically.
29 """)
30
31 direction = Enum("ltr", "rtl", help="""
32 """)
33
34 tooltips = Bool(default=True, help="""
35 """)
36
37 callback = Instance(Callback, help="""
38 A callback to run in the browser whenever the current Slider value changes.
39 """)
40
41 callback_throttle = Float(default=200, help="""
42 Number of millseconds to pause between callback calls as the slider is moved.
43 """)
44
45 callback_policy = Enum(SliderCallbackPolicy, default="throttle", help="""
46 When the callback is initiated. This parameter can take on only one of three options:
47
48 * "continuous": the callback will be executed immediately for each movement of the slider
49 * "throttle": the callback will be executed at most every ``callback_throttle`` milliseconds.
50 * "mouseup": the callback will be executed only once when the slider is released.
51
52 The "mouseup" policy is intended for scenarios in which the callback is expensive in time.
53 """)
54
55 bar_color = Color(default="#3fb8af", help="""
56 """)
57
58 class Slider(AbstractSlider):
59 """ Slider-based number selection widget. """
60
61 start = Float(help="""
62 The minimum allowable value.
63 """)
64
65 end = Float(help="""
66 The maximum allowable value.
67 """)
68
69 value = Float(help="""
70 Initial or selected value.
71 """)
72
73 step = Float(default=1, help="""
74 The step between consecutive values.
75 """)
76
77 format = Override(default="0,0.00")
78
79 class RangeSlider(AbstractSlider):
80 """ Range-slider based number range selection widget. """
81
82 value = Tuple(Float, Float, help="""
83 Initial or selected range.
84 """)
85
86 start = Float(help="""
87 The minimum allowable value.
88 """)
89
90 end = Float(help="""
91 The maximum allowable value.
92 """)
93
94 step = Float(default=1, help="""
95 The step between consecutive values.
96 """)
97
98 format = Override(default="0,0.00")
99
100 class DateSlider(AbstractSlider):
101 """ Slider-based date selection widget. """
102
103 value = Date(help="""
104 Initial or selected value.
105 """)
106
107 start = Date(help="""
108 The minimum allowable value.
109 """)
110
111 end = Date(help="""
112 The maximum allowable value.
113 """)
114
115 step = Int(default=1, help="""
116 The step between consecutive values.
117 """)
118
119 format = Override(default="%d %b %G")
120
121 class DateRangeSlider(AbstractSlider):
122 """ Slider-based date range selection widget. """
123
124 value = Tuple(Date, Date, help="""
125 Initial or selected range.
126 """)
127
128 start = Date(help="""
129 The minimum allowable value.
130 """)
131
132 end = Date(help="""
133 The maximum allowable value.
134 """)
135
136 step = Int(default=1, help="""
137 The step between consecutive values.
138 """)
139
140 format = Override(default="%d %b %G")
141
[end of bokeh/models/widgets/sliders.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/models/widgets/sliders.py b/bokeh/models/widgets/sliders.py
--- a/bokeh/models/widgets/sliders.py
+++ b/bokeh/models/widgets/sliders.py
@@ -74,7 +74,7 @@
The step between consecutive values.
""")
- format = Override(default="0,0.00")
+ format = Override(default="0[.]00")
class RangeSlider(AbstractSlider):
""" Range-slider based number range selection widget. """
@@ -95,7 +95,7 @@
The step between consecutive values.
""")
- format = Override(default="0,0.00")
+ format = Override(default="0[.]00")
class DateSlider(AbstractSlider):
""" Slider-based date selection widget. """
| {"golden_diff": "diff --git a/bokeh/models/widgets/sliders.py b/bokeh/models/widgets/sliders.py\n--- a/bokeh/models/widgets/sliders.py\n+++ b/bokeh/models/widgets/sliders.py\n@@ -74,7 +74,7 @@\n The step between consecutive values.\n \"\"\")\n \n- format = Override(default=\"0,0.00\")\n+ format = Override(default=\"0[.]00\")\n \n class RangeSlider(AbstractSlider):\n \"\"\" Range-slider based number range selection widget. \"\"\"\n@@ -95,7 +95,7 @@\n The step between consecutive values.\n \"\"\")\n \n- format = Override(default=\"0,0.00\")\n+ format = Override(default=\"0[.]00\")\n \n class DateSlider(AbstractSlider):\n \"\"\" Slider-based date selection widget. \"\"\"\n", "issue": "Automatic configuration of Slider.format\nInteger sliders should use integer formatting.\r\n\n", "before_files": [{"content": "\"\"\" Various kinds of slider widgets.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ...core.has_props import abstract\nfrom ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override\nfrom ...core.enums import SliderCallbackPolicy\nfrom ..callbacks import Callback\nfrom .widget import Widget\n\n@abstract\nclass AbstractSlider(Widget):\n \"\"\" \"\"\"\n\n title = String(default=\"\", help=\"\"\"\n Slider's label.\n \"\"\")\n\n show_value = Bool(default=True, help=\"\"\"\n Whether or not show slider's value.\n \"\"\")\n\n format = String(help=\"\"\"\n \"\"\")\n\n orientation = Enum(\"horizontal\", \"vertical\", help=\"\"\"\n Orient the slider either horizontally (default) or vertically.\n \"\"\")\n\n direction = Enum(\"ltr\", \"rtl\", help=\"\"\"\n \"\"\")\n\n tooltips = Bool(default=True, help=\"\"\"\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current Slider value changes.\n \"\"\")\n\n callback_throttle = Float(default=200, help=\"\"\"\n Number of millseconds to pause between callback calls as the slider is moved.\n \"\"\")\n\n callback_policy = Enum(SliderCallbackPolicy, default=\"throttle\", help=\"\"\"\n When the callback is initiated. This parameter can take on only one of three options:\n\n * \"continuous\": the callback will be executed immediately for each movement of the slider\n * \"throttle\": the callback will be executed at most every ``callback_throttle`` milliseconds.\n * \"mouseup\": the callback will be executed only once when the slider is released.\n\n The \"mouseup\" policy is intended for scenarios in which the callback is expensive in time.\n \"\"\")\n\n bar_color = Color(default=\"#3fb8af\", help=\"\"\"\n \"\"\")\n\nclass Slider(AbstractSlider):\n \"\"\" Slider-based number selection widget. \"\"\"\n\n start = Float(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Float(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n value = Float(help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n step = Float(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"0,0.00\")\n\nclass RangeSlider(AbstractSlider):\n \"\"\" Range-slider based number range selection widget. \"\"\"\n\n value = Tuple(Float, Float, help=\"\"\"\n Initial or selected range.\n \"\"\")\n\n start = Float(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Float(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Float(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"0,0.00\")\n\nclass DateSlider(AbstractSlider):\n \"\"\" Slider-based date selection widget. \"\"\"\n\n value = Date(help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n start = Date(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Date(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Int(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"%d %b %G\")\n\nclass DateRangeSlider(AbstractSlider):\n \"\"\" Slider-based date range selection widget. \"\"\"\n\n value = Tuple(Date, Date, help=\"\"\"\n Initial or selected range.\n \"\"\")\n\n start = Date(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Date(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Int(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"%d %b %G\")\n", "path": "bokeh/models/widgets/sliders.py"}]} | 1,689 | 178 |
gh_patches_debug_11006 | rasdani/github-patches | git_diff | urllib3__urllib3-818 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeDecodeError in format_header_params
This issue was discussed here: https://github.com/kennethreitz/requests/issues/2639 and it seemed like the consensus was that this should be fixed in urllib3.
</issue>
<code>
[start of urllib3/fields.py]
1 from __future__ import absolute_import
2 import email.utils
3 import mimetypes
4
5 from .packages import six
6
7
8 def guess_content_type(filename, default='application/octet-stream'):
9 """
10 Guess the "Content-Type" of a file.
11
12 :param filename:
13 The filename to guess the "Content-Type" of using :mod:`mimetypes`.
14 :param default:
15 If no "Content-Type" can be guessed, default to `default`.
16 """
17 if filename:
18 return mimetypes.guess_type(filename)[0] or default
19 return default
20
21
22 def format_header_param(name, value):
23 """
24 Helper function to format and quote a single header parameter.
25
26 Particularly useful for header parameters which might contain
27 non-ASCII values, like file names. This follows RFC 2231, as
28 suggested by RFC 2388 Section 4.4.
29
30 :param name:
31 The name of the parameter, a string expected to be ASCII only.
32 :param value:
33 The value of the parameter, provided as a unicode string.
34 """
35 if not any(ch in value for ch in '"\\\r\n'):
36 result = '%s="%s"' % (name, value)
37 try:
38 result.encode('ascii')
39 except UnicodeEncodeError:
40 pass
41 else:
42 return result
43 if not six.PY3: # Python 2:
44 value = value.encode('utf-8')
45 value = email.utils.encode_rfc2231(value, 'utf-8')
46 value = '%s*=%s' % (name, value)
47 return value
48
49
50 class RequestField(object):
51 """
52 A data container for request body parameters.
53
54 :param name:
55 The name of this request field.
56 :param data:
57 The data/value body.
58 :param filename:
59 An optional filename of the request field.
60 :param headers:
61 An optional dict-like object of headers to initially use for the field.
62 """
63 def __init__(self, name, data, filename=None, headers=None):
64 self._name = name
65 self._filename = filename
66 self.data = data
67 self.headers = {}
68 if headers:
69 self.headers = dict(headers)
70
71 @classmethod
72 def from_tuples(cls, fieldname, value):
73 """
74 A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
75
76 Supports constructing :class:`~urllib3.fields.RequestField` from
77 parameter of key/value strings AND key/filetuple. A filetuple is a
78 (filename, data, MIME type) tuple where the MIME type is optional.
79 For example::
80
81 'foo': 'bar',
82 'fakefile': ('foofile.txt', 'contents of foofile'),
83 'realfile': ('barfile.txt', open('realfile').read()),
84 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
85 'nonamefile': 'contents of nonamefile field',
86
87 Field names and filenames must be unicode.
88 """
89 if isinstance(value, tuple):
90 if len(value) == 3:
91 filename, data, content_type = value
92 else:
93 filename, data = value
94 content_type = guess_content_type(filename)
95 else:
96 filename = None
97 content_type = None
98 data = value
99
100 request_param = cls(fieldname, data, filename=filename)
101 request_param.make_multipart(content_type=content_type)
102
103 return request_param
104
105 def _render_part(self, name, value):
106 """
107 Overridable helper function to format a single header parameter.
108
109 :param name:
110 The name of the parameter, a string expected to be ASCII only.
111 :param value:
112 The value of the parameter, provided as a unicode string.
113 """
114 return format_header_param(name, value)
115
116 def _render_parts(self, header_parts):
117 """
118 Helper function to format and quote a single header.
119
120 Useful for single headers that are composed of multiple items. E.g.,
121 'Content-Disposition' fields.
122
123 :param header_parts:
124 A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
125 as `k1="v1"; k2="v2"; ...`.
126 """
127 parts = []
128 iterable = header_parts
129 if isinstance(header_parts, dict):
130 iterable = header_parts.items()
131
132 for name, value in iterable:
133 if value:
134 parts.append(self._render_part(name, value))
135
136 return '; '.join(parts)
137
138 def render_headers(self):
139 """
140 Renders the headers for this request field.
141 """
142 lines = []
143
144 sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
145 for sort_key in sort_keys:
146 if self.headers.get(sort_key, False):
147 lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
148
149 for header_name, header_value in self.headers.items():
150 if header_name not in sort_keys:
151 if header_value:
152 lines.append('%s: %s' % (header_name, header_value))
153
154 lines.append('\r\n')
155 return '\r\n'.join(lines)
156
157 def make_multipart(self, content_disposition=None, content_type=None,
158 content_location=None):
159 """
160 Makes this request field into a multipart request field.
161
162 This method overrides "Content-Disposition", "Content-Type" and
163 "Content-Location" headers to the request parameter.
164
165 :param content_type:
166 The 'Content-Type' of the request body.
167 :param content_location:
168 The 'Content-Location' of the request body.
169
170 """
171 self.headers['Content-Disposition'] = content_disposition or 'form-data'
172 self.headers['Content-Disposition'] += '; '.join([
173 '', self._render_parts(
174 (('name', self._name), ('filename', self._filename))
175 )
176 ])
177 self.headers['Content-Type'] = content_type
178 self.headers['Content-Location'] = content_location
179
[end of urllib3/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/urllib3/fields.py b/urllib3/fields.py
--- a/urllib3/fields.py
+++ b/urllib3/fields.py
@@ -36,11 +36,11 @@
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
- except UnicodeEncodeError:
+ except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
- if not six.PY3: # Python 2:
+ if not six.PY3 and isinstance(value, six.text_type): # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
| {"golden_diff": "diff --git a/urllib3/fields.py b/urllib3/fields.py\n--- a/urllib3/fields.py\n+++ b/urllib3/fields.py\n@@ -36,11 +36,11 @@\n result = '%s=\"%s\"' % (name, value)\n try:\n result.encode('ascii')\n- except UnicodeEncodeError:\n+ except (UnicodeEncodeError, UnicodeDecodeError):\n pass\n else:\n return result\n- if not six.PY3: # Python 2:\n+ if not six.PY3 and isinstance(value, six.text_type): # Python 2:\n value = value.encode('utf-8')\n value = email.utils.encode_rfc2231(value, 'utf-8')\n value = '%s*=%s' % (name, value)\n", "issue": "UnicodeDecodeError in format_header_params\nThis issue was discussed here: https://github.com/kennethreitz/requests/issues/2639 and it seemed like the consensus was that this should be fixed in urllib3. \n\n", "before_files": [{"content": "from __future__ import absolute_import\nimport email.utils\nimport mimetypes\n\nfrom .packages import six\n\n\ndef guess_content_type(filename, default='application/octet-stream'):\n \"\"\"\n Guess the \"Content-Type\" of a file.\n\n :param filename:\n The filename to guess the \"Content-Type\" of using :mod:`mimetypes`.\n :param default:\n If no \"Content-Type\" can be guessed, default to `default`.\n \"\"\"\n if filename:\n return mimetypes.guess_type(filename)[0] or default\n return default\n\n\ndef format_header_param(name, value):\n \"\"\"\n Helper function to format and quote a single header parameter.\n\n Particularly useful for header parameters which might contain\n non-ASCII values, like file names. This follows RFC 2231, as\n suggested by RFC 2388 Section 4.4.\n\n :param name:\n The name of the parameter, a string expected to be ASCII only.\n :param value:\n The value of the parameter, provided as a unicode string.\n \"\"\"\n if not any(ch in value for ch in '\"\\\\\\r\\n'):\n result = '%s=\"%s\"' % (name, value)\n try:\n result.encode('ascii')\n except UnicodeEncodeError:\n pass\n else:\n return result\n if not six.PY3: # Python 2:\n value = value.encode('utf-8')\n value = email.utils.encode_rfc2231(value, 'utf-8')\n value = '%s*=%s' % (name, value)\n return value\n\n\nclass RequestField(object):\n \"\"\"\n A data container for request body parameters.\n\n :param name:\n The name of this request field.\n :param data:\n The data/value body.\n :param filename:\n An optional filename of the request field.\n :param headers:\n An optional dict-like object of headers to initially use for the field.\n \"\"\"\n def __init__(self, name, data, filename=None, headers=None):\n self._name = name\n self._filename = filename\n self.data = data\n self.headers = {}\n if headers:\n self.headers = dict(headers)\n\n @classmethod\n def from_tuples(cls, fieldname, value):\n \"\"\"\n A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.\n\n Supports constructing :class:`~urllib3.fields.RequestField` from\n parameter of key/value strings AND key/filetuple. A filetuple is a\n (filename, data, MIME type) tuple where the MIME type is optional.\n For example::\n\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n\n Field names and filenames must be unicode.\n \"\"\"\n if isinstance(value, tuple):\n if len(value) == 3:\n filename, data, content_type = value\n else:\n filename, data = value\n content_type = guess_content_type(filename)\n else:\n filename = None\n content_type = None\n data = value\n\n request_param = cls(fieldname, data, filename=filename)\n request_param.make_multipart(content_type=content_type)\n\n return request_param\n\n def _render_part(self, name, value):\n \"\"\"\n Overridable helper function to format a single header parameter.\n\n :param name:\n The name of the parameter, a string expected to be ASCII only.\n :param value:\n The value of the parameter, provided as a unicode string.\n \"\"\"\n return format_header_param(name, value)\n\n def _render_parts(self, header_parts):\n \"\"\"\n Helper function to format and quote a single header.\n\n Useful for single headers that are composed of multiple items. E.g.,\n 'Content-Disposition' fields.\n\n :param header_parts:\n A sequence of (k, v) typles or a :class:`dict` of (k, v) to format\n as `k1=\"v1\"; k2=\"v2\"; ...`.\n \"\"\"\n parts = []\n iterable = header_parts\n if isinstance(header_parts, dict):\n iterable = header_parts.items()\n\n for name, value in iterable:\n if value:\n parts.append(self._render_part(name, value))\n\n return '; '.join(parts)\n\n def render_headers(self):\n \"\"\"\n Renders the headers for this request field.\n \"\"\"\n lines = []\n\n sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']\n for sort_key in sort_keys:\n if self.headers.get(sort_key, False):\n lines.append('%s: %s' % (sort_key, self.headers[sort_key]))\n\n for header_name, header_value in self.headers.items():\n if header_name not in sort_keys:\n if header_value:\n lines.append('%s: %s' % (header_name, header_value))\n\n lines.append('\\r\\n')\n return '\\r\\n'.join(lines)\n\n def make_multipart(self, content_disposition=None, content_type=None,\n content_location=None):\n \"\"\"\n Makes this request field into a multipart request field.\n\n This method overrides \"Content-Disposition\", \"Content-Type\" and\n \"Content-Location\" headers to the request parameter.\n\n :param content_type:\n The 'Content-Type' of the request body.\n :param content_location:\n The 'Content-Location' of the request body.\n\n \"\"\"\n self.headers['Content-Disposition'] = content_disposition or 'form-data'\n self.headers['Content-Disposition'] += '; '.join([\n '', self._render_parts(\n (('name', self._name), ('filename', self._filename))\n )\n ])\n self.headers['Content-Type'] = content_type\n self.headers['Content-Location'] = content_location\n", "path": "urllib3/fields.py"}]} | 2,321 | 187 |
gh_patches_debug_22902 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-720 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
W&B: Allow for passing experiment into the WandbLogger (and logging semantics)
Currently, the WandbLogger will automatically create a new internal experiment (run) whenever you create a new WandbLogger.
# Issue
If I instantiate a wandb experiment outside of the logger, then I will have two experiments when I train my model since there is no way to set the internal experiment of the WandbLogger to my current external experiment.
# Potential Solution
Allow for passing an experiment into the WandbLogger:
```
class WandbLogger(LightningLoggerBase):
def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False,
version=None, project=None, tags=None, experiment=None):
.
.
.
self._experiment = experiment
```
Then I can do this:
```
experiment = wandb.init(.......)
wandb_logger = WandbLogger(experiment=experiment)
```
I made this change locally, however, I wasn't sure if this was something you also wanted to implement as well. It works for me.
# Another small note
In the `WandbLogger.log_metrics` function, I would change:
`self.experiment.history.add(metrics)` --> `self.experiment.log(metrics)`
</issue>
<code>
[start of pytorch_lightning/logging/wandb.py]
1 import os
2
3 try:
4 import wandb
5 except ImportError:
6 raise ImportError('Missing wandb package.')
7
8 from .base import LightningLoggerBase, rank_zero_only
9
10
11 class WandbLogger(LightningLoggerBase):
12 """
13 Logger for W&B.
14
15 Args:
16 name (str): display name for the run.
17 save_dir (str): path where data is saved.
18 offline (bool): run offline (data can be streamed later to wandb servers).
19 id or version (str): sets the version, mainly used to resume a previous run.
20 anonymous (bool): enables or explicitly disables anonymous logging.
21 project (str): the name of the project to which this run will belong.
22 tags (list of str): tags associated with this run.
23
24 Example
25 --------
26 .. code-block:: python
27
28 from pytorch_lightning.logging import WandbLogger
29 from pytorch_lightning import Trainer
30
31 wandb_logger = WandbLogger()
32 trainer = Trainer(logger=wandb_logger)
33 """
34
35 def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False,
36 version=None, project=None, tags=None):
37 super().__init__()
38 self._name = name
39 self._save_dir = save_dir
40 self._anonymous = "allow" if anonymous else None
41 self._id = version or id
42 self._tags = tags
43 self._project = project
44 self._experiment = None
45 self._offline = offline
46
47 def __getstate__(self):
48 state = self.__dict__.copy()
49 # cannot be pickled
50 state['_experiment'] = None
51 # args needed to reload correct experiment
52 state['_id'] = self.experiment.id
53 return state
54
55 @property
56 def experiment(self):
57 r"""
58
59 Actual wandb object. To use wandb features do the following.
60
61 Example::
62
63 self.logger.experiment.some_wandb_function()
64
65 """
66 if self._experiment is None:
67 if self._offline:
68 os.environ["WANDB_MODE"] = "dryrun"
69 self._experiment = wandb.init(
70 name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,
71 id=self._id, resume="allow", tags=self._tags)
72 return self._experiment
73
74 def watch(self, model, log="gradients", log_freq=100):
75 wandb.watch(model, log, log_freq)
76
77 @rank_zero_only
78 def log_hyperparams(self, params):
79 self.experiment.config.update(params)
80
81 @rank_zero_only
82 def log_metrics(self, metrics, step=None):
83 metrics["global_step"] = step
84 self.experiment.history.add(metrics)
85
86 def save(self):
87 pass
88
89 @rank_zero_only
90 def finalize(self, status='success'):
91 try:
92 exit_code = 0 if status == 'success' else 1
93 wandb.join(exit_code)
94 except TypeError:
95 wandb.join()
96
97 @property
98 def name(self):
99 return self.experiment.project_name()
100
101 @property
102 def version(self):
103 return self.experiment.id
104
[end of pytorch_lightning/logging/wandb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/logging/wandb.py b/pytorch_lightning/logging/wandb.py
--- a/pytorch_lightning/logging/wandb.py
+++ b/pytorch_lightning/logging/wandb.py
@@ -33,7 +33,7 @@
"""
def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False,
- version=None, project=None, tags=None):
+ version=None, project=None, tags=None, experiment=None):
super().__init__()
self._name = name
self._save_dir = save_dir
@@ -41,7 +41,7 @@
self._id = version or id
self._tags = tags
self._project = project
- self._experiment = None
+ self._experiment = experiment
self._offline = offline
def __getstate__(self):
@@ -81,7 +81,7 @@
@rank_zero_only
def log_metrics(self, metrics, step=None):
metrics["global_step"] = step
- self.experiment.history.add(metrics)
+ self.experiment.log(metrics)
def save(self):
pass
| {"golden_diff": "diff --git a/pytorch_lightning/logging/wandb.py b/pytorch_lightning/logging/wandb.py\n--- a/pytorch_lightning/logging/wandb.py\n+++ b/pytorch_lightning/logging/wandb.py\n@@ -33,7 +33,7 @@\n \"\"\"\n \n def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False,\n- version=None, project=None, tags=None):\n+ version=None, project=None, tags=None, experiment=None):\n super().__init__()\n self._name = name\n self._save_dir = save_dir\n@@ -41,7 +41,7 @@\n self._id = version or id\n self._tags = tags\n self._project = project\n- self._experiment = None\n+ self._experiment = experiment\n self._offline = offline\n \n def __getstate__(self):\n@@ -81,7 +81,7 @@\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n metrics[\"global_step\"] = step\n- self.experiment.history.add(metrics)\n+ self.experiment.log(metrics)\n \n def save(self):\n pass\n", "issue": "W&B: Allow for passing experiment into the WandbLogger (and logging semantics)\nCurrently, the WandbLogger will automatically create a new internal experiment (run) whenever you create a new WandbLogger.\r\n\r\n# Issue \r\n\r\nIf I instantiate a wandb experiment outside of the logger, then I will have two experiments when I train my model since there is no way to set the internal experiment of the WandbLogger to my current external experiment.\r\n\r\n# Potential Solution\r\nAllow for passing an experiment into the WandbLogger:\r\n\r\n```\r\nclass WandbLogger(LightningLoggerBase):\r\n def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False,\r\n version=None, project=None, tags=None, experiment=None):\r\n .\r\n .\r\n .\r\n self._experiment = experiment\r\n```\r\nThen I can do this:\r\n\r\n```\r\nexperiment = wandb.init(.......)\r\nwandb_logger = WandbLogger(experiment=experiment)\r\n```\r\nI made this change locally, however, I wasn't sure if this was something you also wanted to implement as well. It works for me.\r\n\r\n# Another small note\r\n\r\nIn the `WandbLogger.log_metrics` function, I would change:\r\n\r\n`self.experiment.history.add(metrics)` --> `self.experiment.log(metrics)`\n", "before_files": [{"content": "import os\n\ntry:\n import wandb\nexcept ImportError:\n raise ImportError('Missing wandb package.')\n\nfrom .base import LightningLoggerBase, rank_zero_only\n\n\nclass WandbLogger(LightningLoggerBase):\n \"\"\"\n Logger for W&B.\n\n Args:\n name (str): display name for the run.\n save_dir (str): path where data is saved.\n offline (bool): run offline (data can be streamed later to wandb servers).\n id or version (str): sets the version, mainly used to resume a previous run.\n anonymous (bool): enables or explicitly disables anonymous logging.\n project (str): the name of the project to which this run will belong.\n tags (list of str): tags associated with this run.\n\n Example\n --------\n .. code-block:: python\n\n from pytorch_lightning.logging import WandbLogger\n from pytorch_lightning import Trainer\n\n wandb_logger = WandbLogger()\n trainer = Trainer(logger=wandb_logger)\n \"\"\"\n\n def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False,\n version=None, project=None, tags=None):\n super().__init__()\n self._name = name\n self._save_dir = save_dir\n self._anonymous = \"allow\" if anonymous else None\n self._id = version or id\n self._tags = tags\n self._project = project\n self._experiment = None\n self._offline = offline\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # cannot be pickled\n state['_experiment'] = None\n # args needed to reload correct experiment\n state['_id'] = self.experiment.id\n return state\n\n @property\n def experiment(self):\n r\"\"\"\n\n Actual wandb object. To use wandb features do the following.\n\n Example::\n\n self.logger.experiment.some_wandb_function()\n\n \"\"\"\n if self._experiment is None:\n if self._offline:\n os.environ[\"WANDB_MODE\"] = \"dryrun\"\n self._experiment = wandb.init(\n name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,\n id=self._id, resume=\"allow\", tags=self._tags)\n return self._experiment\n\n def watch(self, model, log=\"gradients\", log_freq=100):\n wandb.watch(model, log, log_freq)\n\n @rank_zero_only\n def log_hyperparams(self, params):\n self.experiment.config.update(params)\n\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n metrics[\"global_step\"] = step\n self.experiment.history.add(metrics)\n\n def save(self):\n pass\n\n @rank_zero_only\n def finalize(self, status='success'):\n try:\n exit_code = 0 if status == 'success' else 1\n wandb.join(exit_code)\n except TypeError:\n wandb.join()\n\n @property\n def name(self):\n return self.experiment.project_name()\n\n @property\n def version(self):\n return self.experiment.id\n", "path": "pytorch_lightning/logging/wandb.py"}]} | 1,705 | 262 |
gh_patches_debug_8178 | rasdani/github-patches | git_diff | kartoza__prj.app-1077 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Please include project name in certifying org admin
<img width="895" alt="image" src="https://user-images.githubusercontent.com/178003/67474466-6ab98000-f64c-11e9-8a69-95e12e8bd404.png">
We have two Kartoza entries there - one for InaSAFE and one for QGIS - I nearly deleted the first not realising it was for InaSAFE
</issue>
<code>
[start of django_project/certification/admin.py]
1 # coding=UTF-8
2 """Model admin class definitions."""
3
4 from django.contrib.gis import admin
5 from simple_history.admin import SimpleHistoryAdmin
6 from certification.models.certificate import Certificate
7 from certification.models.course import Course
8 from certification.models.training_center import TrainingCenter
9 from certification.models.course_convener import CourseConvener
10 from certification.models.course_type import CourseType
11 from certification.models.attendee import Attendee
12 from certification.models.course_attendee import CourseAttendee
13 from certification.models.certifying_organisation import CertifyingOrganisation
14 from certification.models.organisation_certificate import \
15 CertifyingOrganisationCertificate
16 from certification.models.status import Status
17
18
19 class CertificateAdmin(admin.ModelAdmin):
20 """Certificate admin model."""
21
22 list_display = ('__unicode__', 'course')
23 search_fields = ('certificateID', 'course__name',)
24
25 def queryset(self, request):
26 """Ensure we use the correct manager.
27
28 :param request: HttpRequest object
29 """
30 query_set = self.model.objects
31 ordering = self.get_ordering(request)
32 if ordering:
33 query_set = query_set.order_by(*ordering)
34 return query_set
35
36
37 class AttendeeAdmin(admin.ModelAdmin):
38 """Attendee admin model."""
39 list_display = ('firstname', 'surname', 'email', 'certifying_organisation')
40 search_fields = ['firstname', 'surname']
41
42 def queryset(self, request):
43 """Ensure we use the correct manager.
44
45 :param request: HttpRequest object
46 """
47 query_set = self.model.objects
48 ordering = self.get_ordering(request)
49 if ordering:
50 query_set = query_set.order_by(*ordering)
51 return query_set
52
53
54 class CourseAttendeeAdmin(admin.ModelAdmin):
55 """Certificate admin model."""
56 list_display = ('course', 'attendee', 'author')
57
58 def queryset(self, request):
59 """Ensure we use the correct manager.
60
61 :param request: HttpRequest object
62 """
63 query_set = self.model.objects
64 ordering = self.get_ordering(request)
65 if ordering:
66 query_set = query_set.order_by(*ordering)
67 return query_set
68
69
70 class CourseAdmin(admin.ModelAdmin):
71 """Course admin model."""
72
73 def queryset(self, request):
74 """Ensure we use the correct manager.
75
76 :param request: HttpRequest object
77 """
78 query_set = self.model.objects
79 ordering = self.get_ordering(request)
80 if ordering:
81 query_set = query_set.order_by(*ordering)
82 return query_set
83
84
85 class CourseTypeAdmin(admin.ModelAdmin):
86 """Course type admin model."""
87
88 def queryset(self, request):
89 """Ensure we use the correct manager.
90
91 :param request: HttpRequest object
92 """
93 query_set = self.model.objects
94 ordering = self.get_ordering(request)
95 if ordering:
96 query_set = query_set.order_by(*ordering)
97 return query_set
98
99
100 class TrainingCenterAdmin(admin.GeoModelAdmin):
101 """Training center admin model."""
102
103 def queryset(self, request):
104 """Ensure we use the correct manager.
105
106 :param request: HttpRequest object
107 """
108 query_set = self.model.objects
109 ordering = self.get_ordering(request)
110 if ordering:
111 query_set = query_set.order_by(*ordering)
112 return query_set
113
114
115 class CourseConvenerAdmin(admin.ModelAdmin):
116 """Course convener admin model."""
117
118 def queryset(self, request):
119 """Ensure we use the correct manager.
120
121 :param request: HttpRequest object
122 """
123 query_set = self.model.objects
124 ordering = self.get_ordering(request)
125 if ordering:
126 query_set = query_set.order_by(*ordering)
127 return query_set
128
129
130 class CertifyingOrganisationCertificateAdminInline(admin.TabularInline):
131 model = CertifyingOrganisationCertificate
132 extra = 0
133
134
135 class CertifyingOrganisationCertificateAdmin(SimpleHistoryAdmin):
136 history_list_display = ['issued', 'valid']
137
138
139 class CertifyingOrganisationAdmin(SimpleHistoryAdmin):
140 """Certifying organisation admin model."""
141
142 filter_horizontal = ('organisation_owners',)
143 search_fields = ['name']
144 list_display = ('name', 'country', 'approved', 'rejected')
145 list_filter = ('country', 'approved', 'rejected', 'status')
146 inlines = (CertifyingOrganisationCertificateAdminInline, )
147 history_list_display = ['status', 'remarks']
148
149 def queryset(self, request):
150 """Ensure we use the correct manager.
151
152 :param request: HttpRequest object
153 """
154 query_set = self.model.objects
155 ordering = self.get_ordering(request)
156 if ordering:
157 query_set = query_set.order_by(*ordering)
158 return query_set
159
160
161 class StatusAdmin(admin.ModelAdmin):
162 list_display = ('name', 'project', 'order')
163
164
165 admin.site.register(Certificate, CertificateAdmin)
166 admin.site.register(Attendee, AttendeeAdmin)
167 admin.site.register(Course, CourseAdmin)
168 admin.site.register(CourseType, CourseTypeAdmin)
169 admin.site.register(TrainingCenter, TrainingCenterAdmin)
170 admin.site.register(CourseConvener, CourseConvenerAdmin)
171 admin.site.register(CertifyingOrganisation, CertifyingOrganisationAdmin)
172 admin.site.register(CourseAttendee, CourseAttendeeAdmin)
173 admin.site.register(
174 CertifyingOrganisationCertificate, CertifyingOrganisationCertificateAdmin)
175 admin.site.register(Status, StatusAdmin)
176
[end of django_project/certification/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/certification/admin.py b/django_project/certification/admin.py
--- a/django_project/certification/admin.py
+++ b/django_project/certification/admin.py
@@ -141,7 +141,7 @@
filter_horizontal = ('organisation_owners',)
search_fields = ['name']
- list_display = ('name', 'country', 'approved', 'rejected')
+ list_display = ('name', 'project', 'country', 'approved', 'rejected')
list_filter = ('country', 'approved', 'rejected', 'status')
inlines = (CertifyingOrganisationCertificateAdminInline, )
history_list_display = ['status', 'remarks']
| {"golden_diff": "diff --git a/django_project/certification/admin.py b/django_project/certification/admin.py\n--- a/django_project/certification/admin.py\n+++ b/django_project/certification/admin.py\n@@ -141,7 +141,7 @@\n \n filter_horizontal = ('organisation_owners',)\n search_fields = ['name']\n- list_display = ('name', 'country', 'approved', 'rejected')\n+ list_display = ('name', 'project', 'country', 'approved', 'rejected')\n list_filter = ('country', 'approved', 'rejected', 'status')\n inlines = (CertifyingOrganisationCertificateAdminInline, )\n history_list_display = ['status', 'remarks']\n", "issue": "Please include project name in certifying org admin\n<img width=\"895\" alt=\"image\" src=\"https://user-images.githubusercontent.com/178003/67474466-6ab98000-f64c-11e9-8a69-95e12e8bd404.png\">\r\n\r\n\r\nWe have two Kartoza entries there - one for InaSAFE and one for QGIS - I nearly deleted the first not realising it was for InaSAFE\n", "before_files": [{"content": "# coding=UTF-8\n\"\"\"Model admin class definitions.\"\"\"\n\nfrom django.contrib.gis import admin\nfrom simple_history.admin import SimpleHistoryAdmin\nfrom certification.models.certificate import Certificate\nfrom certification.models.course import Course\nfrom certification.models.training_center import TrainingCenter\nfrom certification.models.course_convener import CourseConvener\nfrom certification.models.course_type import CourseType\nfrom certification.models.attendee import Attendee\nfrom certification.models.course_attendee import CourseAttendee\nfrom certification.models.certifying_organisation import CertifyingOrganisation\nfrom certification.models.organisation_certificate import \\\n CertifyingOrganisationCertificate\nfrom certification.models.status import Status\n\n\nclass CertificateAdmin(admin.ModelAdmin):\n \"\"\"Certificate admin model.\"\"\"\n\n list_display = ('__unicode__', 'course')\n search_fields = ('certificateID', 'course__name',)\n\n def queryset(self, request):\n \"\"\"Ensure we use the correct manager.\n\n :param request: HttpRequest object\n \"\"\"\n query_set = self.model.objects\n ordering = self.get_ordering(request)\n if ordering:\n query_set = query_set.order_by(*ordering)\n return query_set\n\n\nclass AttendeeAdmin(admin.ModelAdmin):\n \"\"\"Attendee admin model.\"\"\"\n list_display = ('firstname', 'surname', 'email', 'certifying_organisation')\n search_fields = ['firstname', 'surname']\n\n def queryset(self, request):\n \"\"\"Ensure we use the correct manager.\n\n :param request: HttpRequest object\n \"\"\"\n query_set = self.model.objects\n ordering = self.get_ordering(request)\n if ordering:\n query_set = query_set.order_by(*ordering)\n return query_set\n\n\nclass CourseAttendeeAdmin(admin.ModelAdmin):\n \"\"\"Certificate admin model.\"\"\"\n list_display = ('course', 'attendee', 'author')\n\n def queryset(self, request):\n \"\"\"Ensure we use the correct manager.\n\n :param request: HttpRequest object\n \"\"\"\n query_set = self.model.objects\n ordering = self.get_ordering(request)\n if ordering:\n query_set = query_set.order_by(*ordering)\n return query_set\n\n\nclass CourseAdmin(admin.ModelAdmin):\n \"\"\"Course admin model.\"\"\"\n\n def queryset(self, request):\n \"\"\"Ensure we use the correct manager.\n\n :param request: HttpRequest object\n \"\"\"\n query_set = self.model.objects\n ordering = self.get_ordering(request)\n if ordering:\n query_set = query_set.order_by(*ordering)\n return query_set\n\n\nclass CourseTypeAdmin(admin.ModelAdmin):\n \"\"\"Course type admin model.\"\"\"\n\n def queryset(self, request):\n \"\"\"Ensure we use the correct manager.\n\n :param request: HttpRequest object\n \"\"\"\n query_set = self.model.objects\n ordering = self.get_ordering(request)\n if ordering:\n query_set = query_set.order_by(*ordering)\n return query_set\n\n\nclass TrainingCenterAdmin(admin.GeoModelAdmin):\n \"\"\"Training center admin model.\"\"\"\n\n def queryset(self, request):\n \"\"\"Ensure we use the correct manager.\n\n :param request: HttpRequest object\n \"\"\"\n query_set = self.model.objects\n ordering = self.get_ordering(request)\n if ordering:\n query_set = query_set.order_by(*ordering)\n return query_set\n\n\nclass CourseConvenerAdmin(admin.ModelAdmin):\n \"\"\"Course convener admin model.\"\"\"\n\n def queryset(self, request):\n \"\"\"Ensure we use the correct manager.\n\n :param request: HttpRequest object\n \"\"\"\n query_set = self.model.objects\n ordering = self.get_ordering(request)\n if ordering:\n query_set = query_set.order_by(*ordering)\n return query_set\n\n\nclass CertifyingOrganisationCertificateAdminInline(admin.TabularInline):\n model = CertifyingOrganisationCertificate\n extra = 0\n\n\nclass CertifyingOrganisationCertificateAdmin(SimpleHistoryAdmin):\n history_list_display = ['issued', 'valid']\n\n\nclass CertifyingOrganisationAdmin(SimpleHistoryAdmin):\n \"\"\"Certifying organisation admin model.\"\"\"\n\n filter_horizontal = ('organisation_owners',)\n search_fields = ['name']\n list_display = ('name', 'country', 'approved', 'rejected')\n list_filter = ('country', 'approved', 'rejected', 'status')\n inlines = (CertifyingOrganisationCertificateAdminInline, )\n history_list_display = ['status', 'remarks']\n\n def queryset(self, request):\n \"\"\"Ensure we use the correct manager.\n\n :param request: HttpRequest object\n \"\"\"\n query_set = self.model.objects\n ordering = self.get_ordering(request)\n if ordering:\n query_set = query_set.order_by(*ordering)\n return query_set\n\n\nclass StatusAdmin(admin.ModelAdmin):\n list_display = ('name', 'project', 'order')\n\n\nadmin.site.register(Certificate, CertificateAdmin)\nadmin.site.register(Attendee, AttendeeAdmin)\nadmin.site.register(Course, CourseAdmin)\nadmin.site.register(CourseType, CourseTypeAdmin)\nadmin.site.register(TrainingCenter, TrainingCenterAdmin)\nadmin.site.register(CourseConvener, CourseConvenerAdmin)\nadmin.site.register(CertifyingOrganisation, CertifyingOrganisationAdmin)\nadmin.site.register(CourseAttendee, CourseAttendeeAdmin)\nadmin.site.register(\n CertifyingOrganisationCertificate, CertifyingOrganisationCertificateAdmin)\nadmin.site.register(Status, StatusAdmin)\n", "path": "django_project/certification/admin.py"}]} | 2,199 | 153 |
gh_patches_debug_26619 | rasdani/github-patches | git_diff | benoitc__gunicorn-826 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upcoming Tornado change breaks gunicorn.workers.gtornado
The next release of Tornado (will be 4.0 when released, although the current master branch hasn't been updated to change all mentions of 3.3 to 4.0) makes some major changes to the HTTP internals and breaks gunicorn's monkey-patching of HTTPConnection.finish. Fortunately, there is now a cleaner way to do the tracking that gunicorn wants to do here, since the interface between HTTPServer and Application is more formally defined by the HTTPServerConnectionDelegate and HTTPMessageDelegate interfaces, so you should be able to wrap the Application (which implements/subclasses HTTPServerConnectionDelegate) and the HTTPMessageDelegate its start_request method returns.
</issue>
<code>
[start of gunicorn/workers/gtornado.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 import os
7 import sys
8
9 try:
10 import tornado.web
11 except ImportError:
12 raise RuntimeError("You need tornado installed to use this worker.")
13 import tornado.httpserver
14 from tornado.ioloop import IOLoop, PeriodicCallback
15 from tornado.wsgi import WSGIContainer
16 from gunicorn.workers.base import Worker
17 from gunicorn import __version__ as gversion
18
19
20 class TornadoWorker(Worker):
21
22 @classmethod
23 def setup(cls):
24 web = sys.modules.pop("tornado.web")
25 old_clear = web.RequestHandler.clear
26
27 def clear(self):
28 old_clear(self)
29 self._headers["Server"] += " (Gunicorn/%s)" % gversion
30 web.RequestHandler.clear = clear
31 sys.modules["tornado.web"] = web
32
33 def handle_exit(self, sig, frame):
34 if self.alive:
35 super(TornadoWorker, self).handle_exit(sig, frame)
36 self.stop()
37
38 def handle_request(self):
39 self.nr += 1
40 if self.alive and self.nr >= self.max_requests:
41 self.alive = False
42 self.log.info("Autorestarting worker after current request.")
43 self.stop()
44
45 def watchdog(self):
46 if self.alive:
47 self.notify()
48
49 if self.ppid != os.getppid():
50 self.log.info("Parent changed, shutting down: %s", self)
51 self.stop()
52
53 def run(self):
54 self.ioloop = IOLoop.instance()
55 self.alive = True
56 PeriodicCallback(self.watchdog, 1000, io_loop=self.ioloop).start()
57
58 # Assume the app is a WSGI callable if its not an
59 # instance of tornado.web.Application or is an
60 # instance of tornado.wsgi.WSGIApplication
61 app = self.wsgi
62 if not isinstance(app, tornado.web.Application) or \
63 isinstance(app, tornado.wsgi.WSGIApplication):
64 app = WSGIContainer(app)
65
66 # Monkey-patching HTTPConnection.finish to count the
67 # number of requests being handled by Tornado. This
68 # will help gunicorn shutdown the worker if max_requests
69 # is exceeded.
70 httpserver = sys.modules["tornado.httpserver"]
71 old_connection_finish = httpserver.HTTPConnection.finish
72
73 def finish(other):
74 self.handle_request()
75 old_connection_finish(other)
76 httpserver.HTTPConnection.finish = finish
77 sys.modules["tornado.httpserver"] = httpserver
78
79 if self.cfg.is_ssl:
80 server = tornado.httpserver.HTTPServer(app, io_loop=self.ioloop,
81 ssl_options=self.cfg.ssl_options)
82 else:
83 server = tornado.httpserver.HTTPServer(app,
84 io_loop=self.ioloop)
85
86 self.server = server
87
88 for s in self.sockets:
89 s.setblocking(0)
90 if hasattr(server, "add_socket"): # tornado > 2.0
91 server.add_socket(s)
92 elif hasattr(server, "_sockets"): # tornado 2.0
93 server._sockets[s.fileno()] = s
94
95 server.no_keep_alive = self.cfg.keepalive <= 0
96 server.start(num_processes=1)
97
98 self.ioloop.start()
99
100 def stop(self):
101 if hasattr(self, 'server'):
102 try:
103 self.server.stop()
104 except Exception:
105 pass
106 PeriodicCallback(self.stop_ioloop, 1000, io_loop=self.ioloop).start()
107
108 def stop_ioloop(self):
109 if not self.ioloop._callbacks and len(self.ioloop._timeouts) <= 1:
110 self.ioloop.stop()
111
[end of gunicorn/workers/gtornado.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gunicorn/workers/gtornado.py b/gunicorn/workers/gtornado.py
--- a/gunicorn/workers/gtornado.py
+++ b/gunicorn/workers/gtornado.py
@@ -68,20 +68,31 @@
# will help gunicorn shutdown the worker if max_requests
# is exceeded.
httpserver = sys.modules["tornado.httpserver"]
- old_connection_finish = httpserver.HTTPConnection.finish
+ if hasattr(httpserver, 'HTTPConnection'):
+ old_connection_finish = httpserver.HTTPConnection.finish
- def finish(other):
- self.handle_request()
- old_connection_finish(other)
- httpserver.HTTPConnection.finish = finish
- sys.modules["tornado.httpserver"] = httpserver
+ def finish(other):
+ self.handle_request()
+ old_connection_finish(other)
+ httpserver.HTTPConnection.finish = finish
+ sys.modules["tornado.httpserver"] = httpserver
+
+ server_class = tornado.httpserver.HTTPServer
+ else:
+
+ class _HTTPServer(tornado.httpserver.HTTPServer):
+
+ def on_close(instance, server_conn):
+ self.handle_request()
+ super(_HTTPServer, instance).on_close(server_conn)
+
+ server_class = _HTTPServer
if self.cfg.is_ssl:
- server = tornado.httpserver.HTTPServer(app, io_loop=self.ioloop,
+ server = server_class(app, io_loop=self.ioloop,
ssl_options=self.cfg.ssl_options)
else:
- server = tornado.httpserver.HTTPServer(app,
- io_loop=self.ioloop)
+ server = server_class(app, io_loop=self.ioloop)
self.server = server
| {"golden_diff": "diff --git a/gunicorn/workers/gtornado.py b/gunicorn/workers/gtornado.py\n--- a/gunicorn/workers/gtornado.py\n+++ b/gunicorn/workers/gtornado.py\n@@ -68,20 +68,31 @@\n # will help gunicorn shutdown the worker if max_requests\n # is exceeded.\n httpserver = sys.modules[\"tornado.httpserver\"]\n- old_connection_finish = httpserver.HTTPConnection.finish\n+ if hasattr(httpserver, 'HTTPConnection'):\n+ old_connection_finish = httpserver.HTTPConnection.finish\n \n- def finish(other):\n- self.handle_request()\n- old_connection_finish(other)\n- httpserver.HTTPConnection.finish = finish\n- sys.modules[\"tornado.httpserver\"] = httpserver\n+ def finish(other):\n+ self.handle_request()\n+ old_connection_finish(other)\n+ httpserver.HTTPConnection.finish = finish\n+ sys.modules[\"tornado.httpserver\"] = httpserver\n+\n+ server_class = tornado.httpserver.HTTPServer\n+ else:\n+\n+ class _HTTPServer(tornado.httpserver.HTTPServer):\n+\n+ def on_close(instance, server_conn):\n+ self.handle_request()\n+ super(_HTTPServer, instance).on_close(server_conn)\n+\n+ server_class = _HTTPServer\n \n if self.cfg.is_ssl:\n- server = tornado.httpserver.HTTPServer(app, io_loop=self.ioloop,\n+ server = server_class(app, io_loop=self.ioloop,\n ssl_options=self.cfg.ssl_options)\n else:\n- server = tornado.httpserver.HTTPServer(app,\n- io_loop=self.ioloop)\n+ server = server_class(app, io_loop=self.ioloop)\n \n self.server = server\n", "issue": "Upcoming Tornado change breaks gunicorn.workers.gtornado\nThe next release of Tornado (will be 4.0 when released, although the current master branch hasn't been updated to change all mentions of 3.3 to 4.0) makes some major changes to the HTTP internals and breaks gunicorn's monkey-patching of HTTPConnection.finish. Fortunately, there is now a cleaner way to do the tracking that gunicorn wants to do here, since the interface between HTTPServer and Application is more formally defined by the HTTPServerConnectionDelegate and HTTPMessageDelegate interfaces, so you should be able to wrap the Application (which implements/subclasses HTTPServerConnectionDelegate) and the HTTPMessageDelegate its start_request method returns. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport os\nimport sys\n\ntry:\n import tornado.web\nexcept ImportError:\n raise RuntimeError(\"You need tornado installed to use this worker.\")\nimport tornado.httpserver\nfrom tornado.ioloop import IOLoop, PeriodicCallback\nfrom tornado.wsgi import WSGIContainer\nfrom gunicorn.workers.base import Worker\nfrom gunicorn import __version__ as gversion\n\n\nclass TornadoWorker(Worker):\n\n @classmethod\n def setup(cls):\n web = sys.modules.pop(\"tornado.web\")\n old_clear = web.RequestHandler.clear\n\n def clear(self):\n old_clear(self)\n self._headers[\"Server\"] += \" (Gunicorn/%s)\" % gversion\n web.RequestHandler.clear = clear\n sys.modules[\"tornado.web\"] = web\n\n def handle_exit(self, sig, frame):\n if self.alive:\n super(TornadoWorker, self).handle_exit(sig, frame)\n self.stop()\n\n def handle_request(self):\n self.nr += 1\n if self.alive and self.nr >= self.max_requests:\n self.alive = False\n self.log.info(\"Autorestarting worker after current request.\")\n self.stop()\n\n def watchdog(self):\n if self.alive:\n self.notify()\n\n if self.ppid != os.getppid():\n self.log.info(\"Parent changed, shutting down: %s\", self)\n self.stop()\n\n def run(self):\n self.ioloop = IOLoop.instance()\n self.alive = True\n PeriodicCallback(self.watchdog, 1000, io_loop=self.ioloop).start()\n\n # Assume the app is a WSGI callable if its not an\n # instance of tornado.web.Application or is an\n # instance of tornado.wsgi.WSGIApplication\n app = self.wsgi\n if not isinstance(app, tornado.web.Application) or \\\n isinstance(app, tornado.wsgi.WSGIApplication):\n app = WSGIContainer(app)\n\n # Monkey-patching HTTPConnection.finish to count the\n # number of requests being handled by Tornado. This\n # will help gunicorn shutdown the worker if max_requests\n # is exceeded.\n httpserver = sys.modules[\"tornado.httpserver\"]\n old_connection_finish = httpserver.HTTPConnection.finish\n\n def finish(other):\n self.handle_request()\n old_connection_finish(other)\n httpserver.HTTPConnection.finish = finish\n sys.modules[\"tornado.httpserver\"] = httpserver\n\n if self.cfg.is_ssl:\n server = tornado.httpserver.HTTPServer(app, io_loop=self.ioloop,\n ssl_options=self.cfg.ssl_options)\n else:\n server = tornado.httpserver.HTTPServer(app,\n io_loop=self.ioloop)\n\n self.server = server\n\n for s in self.sockets:\n s.setblocking(0)\n if hasattr(server, \"add_socket\"): # tornado > 2.0\n server.add_socket(s)\n elif hasattr(server, \"_sockets\"): # tornado 2.0\n server._sockets[s.fileno()] = s\n\n server.no_keep_alive = self.cfg.keepalive <= 0\n server.start(num_processes=1)\n\n self.ioloop.start()\n\n def stop(self):\n if hasattr(self, 'server'):\n try:\n self.server.stop()\n except Exception:\n pass\n PeriodicCallback(self.stop_ioloop, 1000, io_loop=self.ioloop).start()\n\n def stop_ioloop(self):\n if not self.ioloop._callbacks and len(self.ioloop._timeouts) <= 1:\n self.ioloop.stop()\n", "path": "gunicorn/workers/gtornado.py"}]} | 1,734 | 373 |
gh_patches_debug_4286 | rasdani/github-patches | git_diff | mozilla__pontoon-2379 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Redirected to a non-existing page while tring to translate to en-US
## Description
When I try to translate an application to `en-US`, I am redirected (`301`) to a non-existing page:

→ `/en-US/my-project/` is redirected to `/my-project/`
## Steps to reproduce
* Add the `en-US` locale to a project
* Browse project's language
* Click on English (en-US)
* → 404
## More...
This seems to be caused by some legacy code in `pontoon/urls.py:26`:
```python
urlpatterns = [
# Legacy: Locale redirect for compatibility with i18n ready URL scheme
path("en-US<path:url>", RedirectView.as_view(url="%(url)s", permanent=True)),
```
Removing this line fixes the problem and does not seem to cause additional issues (I am still testing on a production instance).
* This issue may be related to → #2192
* Another issue to look at about `en-US` locale → #2260
</issue>
<code>
[start of pontoon/urls.py]
1 from django.urls import include, path, register_converter
2 from django.urls.converters import StringConverter
3 from django.contrib import admin
4 from django.contrib.auth import logout
5 from django.views.generic import RedirectView, TemplateView
6
7 from pontoon.teams.views import team
8
9
10 class LocaleConverter(StringConverter):
11 regex = r"[A-Za-z0-9\-\@\.]+"
12
13
14 register_converter(LocaleConverter, "locale")
15
16 pontoon_js_view = TemplateView.as_view(
17 template_name="js/pontoon.js", content_type="text/javascript"
18 )
19
20 permission_denied_view = TemplateView.as_view(template_name="403.html")
21 page_not_found_view = TemplateView.as_view(template_name="404.html")
22 server_error_view = TemplateView.as_view(template_name="500.html")
23
24 urlpatterns = [
25 # Legacy: Locale redirect for compatibility with i18n ready URL scheme
26 path("en-US<path:url>", RedirectView.as_view(url="%(url)s", permanent=True)),
27 # Redirect legacy Aurora projects
28 path(
29 "projects/firefox-aurora/<path:url>",
30 RedirectView.as_view(url="/projects/firefox/%(url)s", permanent=True),
31 ),
32 path(
33 "projects/firefox-for-android-aurora/<path:url>",
34 RedirectView.as_view(
35 url="/projects/firefox-for-android/%(url)s", permanent=True
36 ),
37 ),
38 path(
39 "projects/thunderbird-aurora/<path:url>",
40 RedirectView.as_view(url="/projects/thunderbird/%(url)s", permanent=True),
41 ),
42 path(
43 "projects/lightning-aurora/<path:url>",
44 RedirectView.as_view(url="/projects/lightning/%(url)s", permanent=True),
45 ),
46 path(
47 "projects/seamonkey-aurora/<path:url>",
48 RedirectView.as_view(url="/projects/seamonkey/%(url)s", permanent=True),
49 ),
50 path(
51 "<locale:locale>/firefox-aurora/<path:url>",
52 RedirectView.as_view(url="/%(locale)s/firefox/%(url)s", permanent=True),
53 ),
54 path(
55 "<locale:locale>/firefox-for-android-aurora/<path:url>",
56 RedirectView.as_view(
57 url="/%(locale)s/firefox-for-android/%(url)s", permanent=True
58 ),
59 ),
60 path(
61 "<locale:locale>/thunderbird-aurora/<path:url>",
62 RedirectView.as_view(url="/%(locale)s/thunderbird/%(url)s", permanent=True),
63 ),
64 path(
65 "<locale:locale>/lightning-aurora/<path:url>",
66 RedirectView.as_view(url="/%(locale)s/lightning/%(url)s", permanent=True),
67 ),
68 path(
69 "<locale:locale>/seamonkey-aurora/<path:url>",
70 RedirectView.as_view(url="/%(locale)s/seamonkey/%(url)s", permanent=True),
71 ),
72 # Accounts
73 path("accounts/", include("pontoon.allauth_urls")),
74 # Admin
75 path("admin/", include("pontoon.administration.urls")),
76 # Django admin: Disable the login form
77 path("a/login/", permission_denied_view),
78 # Django admin
79 path("a/", admin.site.urls),
80 # Logout
81 path("signout/", logout, {"next_page": "/"}, name="signout"),
82 # Error pages
83 path("403/", permission_denied_view),
84 path("404/", page_not_found_view),
85 path("500/", server_error_view),
86 # Robots.txt
87 path(
88 "robots.txt",
89 TemplateView.as_view(template_name="robots.txt", content_type="text/plain"),
90 ),
91 # contribute.json
92 path(
93 "contribute.json",
94 TemplateView.as_view(
95 template_name="contribute.json", content_type="text/plain"
96 ),
97 ),
98 # Favicon
99 path(
100 "favicon.ico",
101 RedirectView.as_view(url="/static/img/favicon.ico", permanent=True),
102 ),
103 # Include script
104 path("pontoon.js", pontoon_js_view),
105 path("static/js/pontoon.js", pontoon_js_view),
106 # Include URL configurations from installed apps
107 path("terminology/", include("pontoon.terminology.urls")),
108 path("translations/", include("pontoon.translations.urls")),
109 path("", include("pontoon.teams.urls")),
110 path("", include("pontoon.tour.urls")),
111 path("", include("pontoon.tags.urls")),
112 path("", include("pontoon.sync.urls")),
113 path("", include("pontoon.projects.urls")),
114 path("", include("pontoon.machinery.urls")),
115 path("", include("pontoon.contributors.urls")),
116 path("", include("pontoon.localizations.urls")),
117 path("", include("pontoon.base.urls")),
118 path("", include("pontoon.translate.urls")),
119 path("", include("pontoon.batch.urls")),
120 path("", include("pontoon.api.urls")),
121 path("", include("pontoon.homepage.urls")),
122 path("", include("pontoon.in_context.urls")),
123 path("", include("pontoon.uxactionlog.urls")),
124 # Team page: Must be at the end
125 path("<locale:locale>/", team, name="pontoon.teams.team"),
126 ]
127
[end of pontoon/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pontoon/urls.py b/pontoon/urls.py
--- a/pontoon/urls.py
+++ b/pontoon/urls.py
@@ -22,8 +22,6 @@
server_error_view = TemplateView.as_view(template_name="500.html")
urlpatterns = [
- # Legacy: Locale redirect for compatibility with i18n ready URL scheme
- path("en-US<path:url>", RedirectView.as_view(url="%(url)s", permanent=True)),
# Redirect legacy Aurora projects
path(
"projects/firefox-aurora/<path:url>",
| {"golden_diff": "diff --git a/pontoon/urls.py b/pontoon/urls.py\n--- a/pontoon/urls.py\n+++ b/pontoon/urls.py\n@@ -22,8 +22,6 @@\n server_error_view = TemplateView.as_view(template_name=\"500.html\")\n \n urlpatterns = [\n- # Legacy: Locale redirect for compatibility with i18n ready URL scheme\n- path(\"en-US<path:url>\", RedirectView.as_view(url=\"%(url)s\", permanent=True)),\n # Redirect legacy Aurora projects\n path(\n \"projects/firefox-aurora/<path:url>\",\n", "issue": "Redirected to a non-existing page while tring to translate to en-US\n## Description\r\n\r\nWhen I try to translate an application to `en-US`, I am redirected (`301`) to a non-existing page:\r\n\r\n\r\n\r\n\u2192 `/en-US/my-project/` is redirected to `/my-project/`\r\n\r\n## Steps to reproduce\r\n\r\n* Add the `en-US` locale to a project\r\n* Browse project's language\r\n* Click on English (en-US)\r\n* \u2192 404\r\n\r\n## More...\r\n\r\nThis seems to be caused by some legacy code in `pontoon/urls.py:26`:\r\n\r\n```python\r\nurlpatterns = [\r\n # Legacy: Locale redirect for compatibility with i18n ready URL scheme\r\n path(\"en-US<path:url>\", RedirectView.as_view(url=\"%(url)s\", permanent=True)),\r\n```\r\n\r\nRemoving this line fixes the problem and does not seem to cause additional issues (I am still testing on a production instance).\r\n\r\n* This issue may be related to \u2192 #2192 \r\n* Another issue to look at about `en-US` locale \u2192 #2260\r\n\n", "before_files": [{"content": "from django.urls import include, path, register_converter\nfrom django.urls.converters import StringConverter\nfrom django.contrib import admin\nfrom django.contrib.auth import logout\nfrom django.views.generic import RedirectView, TemplateView\n\nfrom pontoon.teams.views import team\n\n\nclass LocaleConverter(StringConverter):\n regex = r\"[A-Za-z0-9\\-\\@\\.]+\"\n\n\nregister_converter(LocaleConverter, \"locale\")\n\npontoon_js_view = TemplateView.as_view(\n template_name=\"js/pontoon.js\", content_type=\"text/javascript\"\n)\n\npermission_denied_view = TemplateView.as_view(template_name=\"403.html\")\npage_not_found_view = TemplateView.as_view(template_name=\"404.html\")\nserver_error_view = TemplateView.as_view(template_name=\"500.html\")\n\nurlpatterns = [\n # Legacy: Locale redirect for compatibility with i18n ready URL scheme\n path(\"en-US<path:url>\", RedirectView.as_view(url=\"%(url)s\", permanent=True)),\n # Redirect legacy Aurora projects\n path(\n \"projects/firefox-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/firefox/%(url)s\", permanent=True),\n ),\n path(\n \"projects/firefox-for-android-aurora/<path:url>\",\n RedirectView.as_view(\n url=\"/projects/firefox-for-android/%(url)s\", permanent=True\n ),\n ),\n path(\n \"projects/thunderbird-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/thunderbird/%(url)s\", permanent=True),\n ),\n path(\n \"projects/lightning-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/lightning/%(url)s\", permanent=True),\n ),\n path(\n \"projects/seamonkey-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/seamonkey/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/firefox-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/firefox/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/firefox-for-android-aurora/<path:url>\",\n RedirectView.as_view(\n url=\"/%(locale)s/firefox-for-android/%(url)s\", permanent=True\n ),\n ),\n path(\n \"<locale:locale>/thunderbird-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/thunderbird/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/lightning-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/lightning/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/seamonkey-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/seamonkey/%(url)s\", permanent=True),\n ),\n # Accounts\n path(\"accounts/\", include(\"pontoon.allauth_urls\")),\n # Admin\n path(\"admin/\", include(\"pontoon.administration.urls\")),\n # Django admin: Disable the login form\n path(\"a/login/\", permission_denied_view),\n # Django admin\n path(\"a/\", admin.site.urls),\n # Logout\n path(\"signout/\", logout, {\"next_page\": \"/\"}, name=\"signout\"),\n # Error pages\n path(\"403/\", permission_denied_view),\n path(\"404/\", page_not_found_view),\n path(\"500/\", server_error_view),\n # Robots.txt\n path(\n \"robots.txt\",\n TemplateView.as_view(template_name=\"robots.txt\", content_type=\"text/plain\"),\n ),\n # contribute.json\n path(\n \"contribute.json\",\n TemplateView.as_view(\n template_name=\"contribute.json\", content_type=\"text/plain\"\n ),\n ),\n # Favicon\n path(\n \"favicon.ico\",\n RedirectView.as_view(url=\"/static/img/favicon.ico\", permanent=True),\n ),\n # Include script\n path(\"pontoon.js\", pontoon_js_view),\n path(\"static/js/pontoon.js\", pontoon_js_view),\n # Include URL configurations from installed apps\n path(\"terminology/\", include(\"pontoon.terminology.urls\")),\n path(\"translations/\", include(\"pontoon.translations.urls\")),\n path(\"\", include(\"pontoon.teams.urls\")),\n path(\"\", include(\"pontoon.tour.urls\")),\n path(\"\", include(\"pontoon.tags.urls\")),\n path(\"\", include(\"pontoon.sync.urls\")),\n path(\"\", include(\"pontoon.projects.urls\")),\n path(\"\", include(\"pontoon.machinery.urls\")),\n path(\"\", include(\"pontoon.contributors.urls\")),\n path(\"\", include(\"pontoon.localizations.urls\")),\n path(\"\", include(\"pontoon.base.urls\")),\n path(\"\", include(\"pontoon.translate.urls\")),\n path(\"\", include(\"pontoon.batch.urls\")),\n path(\"\", include(\"pontoon.api.urls\")),\n path(\"\", include(\"pontoon.homepage.urls\")),\n path(\"\", include(\"pontoon.in_context.urls\")),\n path(\"\", include(\"pontoon.uxactionlog.urls\")),\n # Team page: Must be at the end\n path(\"<locale:locale>/\", team, name=\"pontoon.teams.team\"),\n]\n", "path": "pontoon/urls.py"}]} | 2,227 | 130 |
gh_patches_debug_61381 | rasdani/github-patches | git_diff | tensorflow__addons-1213 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Proposal: Upload the nightlies at each commit on the master branch
We already build the wheels already anyway. It's just missing the push to pypi. Yes/No?
</issue>
<code>
[start of setup.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """TensorFlow Addons.
16
17 TensorFlow Addons is a repository of contributions that conform to well-
18 established API patterns, but implement new functionality not available
19 in core TensorFlow. TensorFlow natively supports a large number of
20 operators, layers, metrics, losses, and optimizers. However, in a fast
21 moving field like ML, there are many interesting new developments that
22 cannot be integrated into core TensorFlow (because their broad
23 applicability is not yet clear, or it is mostly used by a smaller subset
24 of the community).
25 """
26
27 import os
28 import sys
29
30 from datetime import datetime
31 from setuptools import find_packages
32 from setuptools import setup
33 from setuptools.dist import Distribution
34 from setuptools import Extension
35
36 DOCLINES = __doc__.split("\n")
37
38 TFA_NIGHTLY = "tfa-nightly"
39 TFA_RELEASE = "tensorflow-addons"
40
41 if "--nightly" in sys.argv:
42 project_name = TFA_NIGHTLY
43 nightly_idx = sys.argv.index("--nightly")
44 sys.argv.pop(nightly_idx)
45 else:
46 project_name = TFA_RELEASE
47
48 # Version
49 version = {}
50 base_dir = os.path.dirname(os.path.abspath(__file__))
51 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp:
52 exec(fp.read(), version)
53
54 if project_name == TFA_NIGHTLY:
55 version["__version__"] += datetime.strftime(datetime.today(), "%Y%m%d")
56
57 with open("requirements.txt") as f:
58 required_pkgs = f.read().splitlines()
59
60 # Manylinux2010 requires a patch for platlib
61 if (
62 sys.platform.startswith("linux")
63 and os.environ.get("TF_ADDONS_NO_BUILD", "0") == "0"
64 ):
65 ext_modules = [Extension("_foo", ["stub.cc"])]
66 else:
67 ext_modules = []
68
69
70 class BinaryDistribution(Distribution):
71 """This class is needed in order to create OS specific wheels."""
72
73 def has_ext_modules(self):
74 return True
75
76
77 setup(
78 name=project_name,
79 version=version["__version__"],
80 description=DOCLINES[0],
81 long_description="\n".join(DOCLINES[2:]),
82 author="Google Inc.",
83 author_email="[email protected]",
84 packages=find_packages(),
85 ext_modules=ext_modules,
86 install_requires=required_pkgs,
87 include_package_data=True,
88 zip_safe=False,
89 distclass=BinaryDistribution,
90 classifiers=[
91 "Development Status :: 4 - Beta",
92 "Intended Audience :: Developers",
93 "Intended Audience :: Education",
94 "Intended Audience :: Science/Research",
95 "License :: OSI Approved :: Apache Software License",
96 "Programming Language :: Python :: 3.5",
97 "Programming Language :: Python :: 3.6",
98 "Programming Language :: Python :: 3.7",
99 "Topic :: Scientific/Engineering :: Mathematics",
100 "Topic :: Software Development :: Libraries :: Python Modules",
101 "Topic :: Software Development :: Libraries",
102 ],
103 license="Apache 2.0",
104 keywords="tensorflow addons machine learning",
105 )
106
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -52,7 +52,7 @@
exec(fp.read(), version)
if project_name == TFA_NIGHTLY:
- version["__version__"] += datetime.strftime(datetime.today(), "%Y%m%d")
+ version["__version__"] += datetime.now().strftime("%Y%m%d%H%M%S")
with open("requirements.txt") as f:
required_pkgs = f.read().splitlines()
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,7 +52,7 @@\n exec(fp.read(), version)\n \n if project_name == TFA_NIGHTLY:\n- version[\"__version__\"] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n+ version[\"__version__\"] += datetime.now().strftime(\"%Y%m%d%H%M%S\")\n \n with open(\"requirements.txt\") as f:\n required_pkgs = f.read().splitlines()\n", "issue": "Proposal: Upload the nightlies at each commit on the master branch\nWe already build the wheels already anyway. It's just missing the push to pypi. Yes/No?\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nimport os\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split(\"\\n\")\n\nTFA_NIGHTLY = \"tfa-nightly\"\nTFA_RELEASE = \"tensorflow-addons\"\n\nif \"--nightly\" in sys.argv:\n project_name = TFA_NIGHTLY\n nightly_idx = sys.argv.index(\"--nightly\")\n sys.argv.pop(nightly_idx)\nelse:\n project_name = TFA_RELEASE\n\n# Version\nversion = {}\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n exec(fp.read(), version)\n\nif project_name == TFA_NIGHTLY:\n version[\"__version__\"] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n\nwith open(\"requirements.txt\") as f:\n required_pkgs = f.read().splitlines()\n\n# Manylinux2010 requires a patch for platlib\nif (\n sys.platform.startswith(\"linux\")\n and os.environ.get(\"TF_ADDONS_NO_BUILD\", \"0\") == \"0\"\n):\n ext_modules = [Extension(\"_foo\", [\"stub.cc\"])]\nelse:\n ext_modules = []\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nsetup(\n name=project_name,\n version=version[\"__version__\"],\n description=DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author=\"Google Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n ext_modules=ext_modules,\n install_requires=required_pkgs,\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Libraries\",\n ],\n license=\"Apache 2.0\",\n keywords=\"tensorflow addons machine learning\",\n)\n", "path": "setup.py"}]} | 1,566 | 111 |
gh_patches_debug_35842 | rasdani/github-patches | git_diff | dmlc__dgl-5543 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Sparse] Support SparseMatrix element-wise multiplication with different sparsities.
## 🔨Work Item
**IMPORTANT:**
* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.
* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.
Project tracker: https://github.com/orgs/dmlc/projects/2
## Description
<del>
Breakdown into two work items.
1. Support on CPU (#5519).
3. Support on GPU.
</del>
Implemented by concatenation and unique on two COO matrices.
## Depending work items or issues
<!-- what must be done before this -->
</issue>
<code>
[start of python/dgl/sparse/elementwise_op_sp.py]
1 """DGL elementwise operators for sparse matrix module."""
2 from typing import Union
3
4 import torch
5
6 from .sparse_matrix import diag, SparseMatrix, val_like
7 from .utils import is_scalar, Scalar
8
9
10 def spsp_add(A, B):
11 """Invoke C++ sparse library for addition"""
12 return SparseMatrix(
13 torch.ops.dgl_sparse.spsp_add(A.c_sparse_matrix, B.c_sparse_matrix)
14 )
15
16
17 def sp_add(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:
18 """Elementwise addition
19
20 Parameters
21 ----------
22 A : SparseMatrix
23 Sparse matrix
24 B : SparseMatrix
25 Sparse matrix
26
27 Returns
28 -------
29 SparseMatrix
30 Sparse matrix
31
32 Examples
33 --------
34
35 >>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
36 >>> val = torch.tensor([10, 20, 30])
37 >>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
38 >>> A + A
39 SparseMatrix(indices=tensor([[0, 1, 2],
40 [3, 0, 2]]),
41 values=tensor([40, 20, 60]),
42 shape=(3, 4), nnz=3)
43 """
44 # Python falls back to B.__radd__ then TypeError when NotImplemented is
45 # returned.
46 return spsp_add(A, B) if isinstance(B, SparseMatrix) else NotImplemented
47
48
49 def sp_sub(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:
50 """Elementwise subtraction
51
52 Parameters
53 ----------
54 A : SparseMatrix
55 Sparse matrix
56 B : SparseMatrix
57 Sparse matrix
58
59 Returns
60 -------
61 SparseMatrix
62 Sparse matrix
63
64 Examples
65 --------
66
67 >>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
68 >>> val = torch.tensor([10, 20, 30])
69 >>> val2 = torch.tensor([5, 10, 15])
70 >>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
71 >>> B = dglsp.spmatrix(indices, val2, shape=(3, 4))
72 >>> A - B
73 SparseMatrix(indices=tensor([[0, 1, 2],
74 [3, 0, 2]]),
75 values=tensor([10, 5, 15]),
76 shape=(3, 4), nnz=3)
77 """
78 # Python falls back to B.__rsub__ then TypeError when NotImplemented is
79 # returned.
80 return spsp_add(A, -B) if isinstance(B, SparseMatrix) else NotImplemented
81
82
83 def sp_mul(A: SparseMatrix, B: Union[SparseMatrix, Scalar]) -> SparseMatrix:
84 """Elementwise multiplication
85
86 If :attr:`B` is a sparse matrix, both :attr:`A` and :attr:`B` must be
87 diagonal matrices.
88
89 Parameters
90 ----------
91 A : SparseMatrix
92 First operand
93 B : SparseMatrix or Scalar
94 Second operand
95
96 Returns
97 -------
98 SparseMatrix
99 Result of A * B
100
101 Examples
102 --------
103
104 >>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
105 >>> val = torch.tensor([1, 2, 3])
106 >>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
107
108 >>> A * 2
109 SparseMatrix(indices=tensor([[1, 0, 2],
110 [0, 3, 2]]),
111 values=tensor([2, 4, 6]),
112 shape=(3, 4), nnz=3)
113
114 >>> 2 * A
115 SparseMatrix(indices=tensor([[1, 0, 2],
116 [0, 3, 2]]),
117 values=tensor([2, 4, 6]),
118 shape=(3, 4), nnz=3)
119 """
120 if is_scalar(B):
121 return val_like(A, A.val * B)
122 if A.is_diag() and B.is_diag():
123 assert A.shape == B.shape, (
124 f"The shape of diagonal matrix A {A.shape} and B {B.shape} must"
125 f"match for elementwise multiplication."
126 )
127 return diag(A.val * B.val, A.shape)
128 # Python falls back to B.__rmul__(A) then TypeError when NotImplemented is
129 # returned.
130 # So this also handles the case of scalar * SparseMatrix since we set
131 # SparseMatrix.__rmul__ to be the same as SparseMatrix.__mul__.
132 return NotImplemented
133
134
135 def sp_div(A: SparseMatrix, B: Union[SparseMatrix, Scalar]) -> SparseMatrix:
136 """Elementwise division
137
138 If :attr:`B` is a sparse matrix, both :attr:`A` and :attr:`B` must be
139 diagonal matrices.
140
141 Parameters
142 ----------
143 A : SparseMatrix
144 First operand
145 B : SparseMatrix or Scalar
146 Second operand
147
148 Returns
149 -------
150 SparseMatrix
151 Result of A / B
152
153 Examples
154 --------
155 >>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
156 >>> val = torch.tensor([1, 2, 3])
157 >>> A = dglsp.spmatrix(indices, val, shape=(3, 4))
158 >>> A / 2
159 SparseMatrix(indices=tensor([[1, 0, 2],
160 [0, 3, 2]]),
161 values=tensor([0.5000, 1.0000, 1.5000]),
162 shape=(3, 4), nnz=3)
163 """
164 if is_scalar(B):
165 return val_like(A, A.val / B)
166 if A.is_diag() and B.is_diag():
167 assert A.shape == B.shape, (
168 f"The shape of diagonal matrix A {A.shape} and B {B.shape} must"
169 f"match for elementwise division."
170 )
171 return diag(A.val / B.val, A.shape)
172 # Python falls back to B.__rtruediv__(A) then TypeError when NotImplemented
173 # is returned.
174 return NotImplemented
175
176
177 def sp_power(A: SparseMatrix, scalar: Scalar) -> SparseMatrix:
178 """Take the power of each nonzero element and return a sparse matrix with
179 the result.
180
181 Parameters
182 ----------
183 A : SparseMatrix
184 Sparse matrix
185 scalar : float or int
186 Exponent
187
188 Returns
189 -------
190 SparseMatrix
191 Sparse matrix
192
193 Examples
194 --------
195 >>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])
196 >>> val = torch.tensor([10, 20, 30])
197 >>> A = dglsp.spmatrix(indices, val)
198 >>> A ** 2
199 SparseMatrix(indices=tensor([[1, 0, 2],
200 [0, 3, 2]]),
201 values=tensor([100, 400, 900]),
202 shape=(3, 4), nnz=3)
203 """
204 # Python falls back to scalar.__rpow__ then TypeError when NotImplemented
205 # is returned.
206 return val_like(A, A.val**scalar) if is_scalar(scalar) else NotImplemented
207
208
209 SparseMatrix.__add__ = sp_add
210 SparseMatrix.__sub__ = sp_sub
211 SparseMatrix.__mul__ = sp_mul
212 SparseMatrix.__rmul__ = sp_mul
213 SparseMatrix.__truediv__ = sp_div
214 SparseMatrix.__pow__ = sp_power
215
[end of python/dgl/sparse/elementwise_op_sp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/dgl/sparse/elementwise_op_sp.py b/python/dgl/sparse/elementwise_op_sp.py
--- a/python/dgl/sparse/elementwise_op_sp.py
+++ b/python/dgl/sparse/elementwise_op_sp.py
@@ -14,6 +14,13 @@
)
+def spsp_mul(A, B):
+ """Invoke C++ sparse library for multiplication"""
+ return SparseMatrix(
+ torch.ops.dgl_sparse.spsp_mul(A.c_sparse_matrix, B.c_sparse_matrix)
+ )
+
+
def sp_add(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:
"""Elementwise addition
@@ -83,8 +90,8 @@
def sp_mul(A: SparseMatrix, B: Union[SparseMatrix, Scalar]) -> SparseMatrix:
"""Elementwise multiplication
- If :attr:`B` is a sparse matrix, both :attr:`A` and :attr:`B` must be
- diagonal matrices.
+ Note that if both :attr:`A` and :attr:`B` are sparse matrices, both of them
+ need to be diagonal or on CPU.
Parameters
----------
@@ -108,28 +115,27 @@
>>> A * 2
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
- values=tensor([2, 4, 6]),
- shape=(3, 4), nnz=3)
+ values=tensor([2, 4, 6]),
+ shape=(3, 4), nnz=3)
>>> 2 * A
SparseMatrix(indices=tensor([[1, 0, 2],
[0, 3, 2]]),
- values=tensor([2, 4, 6]),
- shape=(3, 4), nnz=3)
+ values=tensor([2, 4, 6]),
+ shape=(3, 4), nnz=3)
+
+ >>> indices2 = torch.tensor([[2, 0, 1], [0, 3, 2]])
+ >>> val2 = torch.tensor([3, 2, 1])
+ >>> B = dglsp.spmatrix(indices2, val2, shape=(3, 4))
+ >>> A * B
+ SparseMatrix(indices=tensor([[0],
+ [3]]),
+ values=tensor([4]),
+ shape=(3, 4), nnz=1)
"""
if is_scalar(B):
return val_like(A, A.val * B)
- if A.is_diag() and B.is_diag():
- assert A.shape == B.shape, (
- f"The shape of diagonal matrix A {A.shape} and B {B.shape} must"
- f"match for elementwise multiplication."
- )
- return diag(A.val * B.val, A.shape)
- # Python falls back to B.__rmul__(A) then TypeError when NotImplemented is
- # returned.
- # So this also handles the case of scalar * SparseMatrix since we set
- # SparseMatrix.__rmul__ to be the same as SparseMatrix.__mul__.
- return NotImplemented
+ return spsp_mul(A, B)
def sp_div(A: SparseMatrix, B: Union[SparseMatrix, Scalar]) -> SparseMatrix:
| {"golden_diff": "diff --git a/python/dgl/sparse/elementwise_op_sp.py b/python/dgl/sparse/elementwise_op_sp.py\n--- a/python/dgl/sparse/elementwise_op_sp.py\n+++ b/python/dgl/sparse/elementwise_op_sp.py\n@@ -14,6 +14,13 @@\n )\n \n \n+def spsp_mul(A, B):\n+ \"\"\"Invoke C++ sparse library for multiplication\"\"\"\n+ return SparseMatrix(\n+ torch.ops.dgl_sparse.spsp_mul(A.c_sparse_matrix, B.c_sparse_matrix)\n+ )\n+\n+\n def sp_add(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:\n \"\"\"Elementwise addition\n \n@@ -83,8 +90,8 @@\n def sp_mul(A: SparseMatrix, B: Union[SparseMatrix, Scalar]) -> SparseMatrix:\n \"\"\"Elementwise multiplication\n \n- If :attr:`B` is a sparse matrix, both :attr:`A` and :attr:`B` must be\n- diagonal matrices.\n+ Note that if both :attr:`A` and :attr:`B` are sparse matrices, both of them\n+ need to be diagonal or on CPU.\n \n Parameters\n ----------\n@@ -108,28 +115,27 @@\n >>> A * 2\n SparseMatrix(indices=tensor([[1, 0, 2],\n [0, 3, 2]]),\n- values=tensor([2, 4, 6]),\n- shape=(3, 4), nnz=3)\n+ values=tensor([2, 4, 6]),\n+ shape=(3, 4), nnz=3)\n \n >>> 2 * A\n SparseMatrix(indices=tensor([[1, 0, 2],\n [0, 3, 2]]),\n- values=tensor([2, 4, 6]),\n- shape=(3, 4), nnz=3)\n+ values=tensor([2, 4, 6]),\n+ shape=(3, 4), nnz=3)\n+\n+ >>> indices2 = torch.tensor([[2, 0, 1], [0, 3, 2]])\n+ >>> val2 = torch.tensor([3, 2, 1])\n+ >>> B = dglsp.spmatrix(indices2, val2, shape=(3, 4))\n+ >>> A * B\n+ SparseMatrix(indices=tensor([[0],\n+ [3]]),\n+ values=tensor([4]),\n+ shape=(3, 4), nnz=1)\n \"\"\"\n if is_scalar(B):\n return val_like(A, A.val * B)\n- if A.is_diag() and B.is_diag():\n- assert A.shape == B.shape, (\n- f\"The shape of diagonal matrix A {A.shape} and B {B.shape} must\"\n- f\"match for elementwise multiplication.\"\n- )\n- return diag(A.val * B.val, A.shape)\n- # Python falls back to B.__rmul__(A) then TypeError when NotImplemented is\n- # returned.\n- # So this also handles the case of scalar * SparseMatrix since we set\n- # SparseMatrix.__rmul__ to be the same as SparseMatrix.__mul__.\n- return NotImplemented\n+ return spsp_mul(A, B)\n \n \n def sp_div(A: SparseMatrix, B: Union[SparseMatrix, Scalar]) -> SparseMatrix:\n", "issue": "[Sparse] Support SparseMatrix element-wise multiplication with different sparsities.\n## \ud83d\udd28Work Item\r\n\r\n**IMPORTANT:**\r\n* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.\r\n* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.\r\n\r\nProject tracker: https://github.com/orgs/dmlc/projects/2\r\n\r\n## Description\r\n\r\n<del>\r\nBreakdown into two work items.\r\n\r\n1. Support on CPU (#5519).\r\n\r\n3. Support on GPU.\r\n\r\n</del>\r\n\r\nImplemented by concatenation and unique on two COO matrices.\r\n## Depending work items or issues\r\n\r\n<!-- what must be done before this -->\r\n\n", "before_files": [{"content": "\"\"\"DGL elementwise operators for sparse matrix module.\"\"\"\nfrom typing import Union\n\nimport torch\n\nfrom .sparse_matrix import diag, SparseMatrix, val_like\nfrom .utils import is_scalar, Scalar\n\n\ndef spsp_add(A, B):\n \"\"\"Invoke C++ sparse library for addition\"\"\"\n return SparseMatrix(\n torch.ops.dgl_sparse.spsp_add(A.c_sparse_matrix, B.c_sparse_matrix)\n )\n\n\ndef sp_add(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:\n \"\"\"Elementwise addition\n\n Parameters\n ----------\n A : SparseMatrix\n Sparse matrix\n B : SparseMatrix\n Sparse matrix\n\n Returns\n -------\n SparseMatrix\n Sparse matrix\n\n Examples\n --------\n\n >>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])\n >>> val = torch.tensor([10, 20, 30])\n >>> A = dglsp.spmatrix(indices, val, shape=(3, 4))\n >>> A + A\n SparseMatrix(indices=tensor([[0, 1, 2],\n [3, 0, 2]]),\n values=tensor([40, 20, 60]),\n shape=(3, 4), nnz=3)\n \"\"\"\n # Python falls back to B.__radd__ then TypeError when NotImplemented is\n # returned.\n return spsp_add(A, B) if isinstance(B, SparseMatrix) else NotImplemented\n\n\ndef sp_sub(A: SparseMatrix, B: SparseMatrix) -> SparseMatrix:\n \"\"\"Elementwise subtraction\n\n Parameters\n ----------\n A : SparseMatrix\n Sparse matrix\n B : SparseMatrix\n Sparse matrix\n\n Returns\n -------\n SparseMatrix\n Sparse matrix\n\n Examples\n --------\n\n >>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])\n >>> val = torch.tensor([10, 20, 30])\n >>> val2 = torch.tensor([5, 10, 15])\n >>> A = dglsp.spmatrix(indices, val, shape=(3, 4))\n >>> B = dglsp.spmatrix(indices, val2, shape=(3, 4))\n >>> A - B\n SparseMatrix(indices=tensor([[0, 1, 2],\n [3, 0, 2]]),\n values=tensor([10, 5, 15]),\n shape=(3, 4), nnz=3)\n \"\"\"\n # Python falls back to B.__rsub__ then TypeError when NotImplemented is\n # returned.\n return spsp_add(A, -B) if isinstance(B, SparseMatrix) else NotImplemented\n\n\ndef sp_mul(A: SparseMatrix, B: Union[SparseMatrix, Scalar]) -> SparseMatrix:\n \"\"\"Elementwise multiplication\n\n If :attr:`B` is a sparse matrix, both :attr:`A` and :attr:`B` must be\n diagonal matrices.\n\n Parameters\n ----------\n A : SparseMatrix\n First operand\n B : SparseMatrix or Scalar\n Second operand\n\n Returns\n -------\n SparseMatrix\n Result of A * B\n\n Examples\n --------\n\n >>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])\n >>> val = torch.tensor([1, 2, 3])\n >>> A = dglsp.spmatrix(indices, val, shape=(3, 4))\n\n >>> A * 2\n SparseMatrix(indices=tensor([[1, 0, 2],\n [0, 3, 2]]),\n values=tensor([2, 4, 6]),\n shape=(3, 4), nnz=3)\n\n >>> 2 * A\n SparseMatrix(indices=tensor([[1, 0, 2],\n [0, 3, 2]]),\n values=tensor([2, 4, 6]),\n shape=(3, 4), nnz=3)\n \"\"\"\n if is_scalar(B):\n return val_like(A, A.val * B)\n if A.is_diag() and B.is_diag():\n assert A.shape == B.shape, (\n f\"The shape of diagonal matrix A {A.shape} and B {B.shape} must\"\n f\"match for elementwise multiplication.\"\n )\n return diag(A.val * B.val, A.shape)\n # Python falls back to B.__rmul__(A) then TypeError when NotImplemented is\n # returned.\n # So this also handles the case of scalar * SparseMatrix since we set\n # SparseMatrix.__rmul__ to be the same as SparseMatrix.__mul__.\n return NotImplemented\n\n\ndef sp_div(A: SparseMatrix, B: Union[SparseMatrix, Scalar]) -> SparseMatrix:\n \"\"\"Elementwise division\n\n If :attr:`B` is a sparse matrix, both :attr:`A` and :attr:`B` must be\n diagonal matrices.\n\n Parameters\n ----------\n A : SparseMatrix\n First operand\n B : SparseMatrix or Scalar\n Second operand\n\n Returns\n -------\n SparseMatrix\n Result of A / B\n\n Examples\n --------\n >>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])\n >>> val = torch.tensor([1, 2, 3])\n >>> A = dglsp.spmatrix(indices, val, shape=(3, 4))\n >>> A / 2\n SparseMatrix(indices=tensor([[1, 0, 2],\n [0, 3, 2]]),\n values=tensor([0.5000, 1.0000, 1.5000]),\n shape=(3, 4), nnz=3)\n \"\"\"\n if is_scalar(B):\n return val_like(A, A.val / B)\n if A.is_diag() and B.is_diag():\n assert A.shape == B.shape, (\n f\"The shape of diagonal matrix A {A.shape} and B {B.shape} must\"\n f\"match for elementwise division.\"\n )\n return diag(A.val / B.val, A.shape)\n # Python falls back to B.__rtruediv__(A) then TypeError when NotImplemented\n # is returned.\n return NotImplemented\n\n\ndef sp_power(A: SparseMatrix, scalar: Scalar) -> SparseMatrix:\n \"\"\"Take the power of each nonzero element and return a sparse matrix with\n the result.\n\n Parameters\n ----------\n A : SparseMatrix\n Sparse matrix\n scalar : float or int\n Exponent\n\n Returns\n -------\n SparseMatrix\n Sparse matrix\n\n Examples\n --------\n >>> indices = torch.tensor([[1, 0, 2], [0, 3, 2]])\n >>> val = torch.tensor([10, 20, 30])\n >>> A = dglsp.spmatrix(indices, val)\n >>> A ** 2\n SparseMatrix(indices=tensor([[1, 0, 2],\n [0, 3, 2]]),\n values=tensor([100, 400, 900]),\n shape=(3, 4), nnz=3)\n \"\"\"\n # Python falls back to scalar.__rpow__ then TypeError when NotImplemented\n # is returned.\n return val_like(A, A.val**scalar) if is_scalar(scalar) else NotImplemented\n\n\nSparseMatrix.__add__ = sp_add\nSparseMatrix.__sub__ = sp_sub\nSparseMatrix.__mul__ = sp_mul\nSparseMatrix.__rmul__ = sp_mul\nSparseMatrix.__truediv__ = sp_div\nSparseMatrix.__pow__ = sp_power\n", "path": "python/dgl/sparse/elementwise_op_sp.py"}]} | 2,956 | 756 |
gh_patches_debug_34411 | rasdani/github-patches | git_diff | ESMCI__cime-3079 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Branch a single instance case from a multi-instance case
When a multi-instance CAM forecast fails, I want to use one of the failed instances
as an exact restart in a single instance case, to speed debugging.
Building it as hybrid or startup won't work because for CAM
those are not exact restarts; they use the CAM initial file
I first tried building a single instance branch run with the multi-instance
as the refcase. That branched the entire multi-instance run.
Then I tried copying all of the restart files for 1 instance into a new directory,
"Restarts", with instance numbers removed from the file names.
I built the single instance case with Restarts as the RUN_REFDIR.
It built, but when I tried to run it it complained about a mozart file
already existing in the run directory. I believe that an earlier stage
of the submit process created that file, so I'm stumped about how
to fix this problem. I've played with GET_REFCASE and CONTINUE_RUN
and other things, but have not found the magic combination.
Am I missing something, or is this a new capability that hasn't been implemented?
$CASEROOT = /gpfs/fs1/work/raeder/Exp/Debug_lwdn4
$RUNDIR = /gpfs/fs1/scratch/raeder/Debug_lwdn4/run
$CESMROOT = /glade/work/raeder/Models/cesm2_2_maint-5.6
Thanks
Kevin
</issue>
<code>
[start of scripts/lib/CIME/Servers/wget.py]
1 """
2 WGET Server class. Interact with a server using WGET protocol
3 """
4 # pylint: disable=super-init-not-called
5 from CIME.XML.standard_module_setup import *
6 from CIME.Servers.generic_server import GenericServer
7 logger = logging.getLogger(__name__)
8
9 class WGET(GenericServer):
10 def __init__(self, address, user='', passwd=''):
11 self._args = ''
12 if user:
13 self._args += "--user {} ".format(user)
14 if passwd:
15 self._args += "--password {} ".format(passwd)
16 self._server_loc = address
17
18 err = run_cmd("wget {} --spider {}".format(self._args, address))[0]
19 expect(err == 0,"Could not connect to repo '{0}'\nThis is most likely either a proxy, or network issue .")
20
21
22 def fileexists(self, rel_path):
23 full_url = os.path.join(self._server_loc, rel_path)
24 stat, out, err = run_cmd("wget {} --spider {}".format(self._args, full_url))
25 if (stat != 0):
26 logging.warning("FAIL: Repo '{}' does not have file '{}'\nReason:{}\n{}\n".format(self._server_loc, full_url, out.encode('utf-8'), err.encode('utf-8')))
27 return False
28 return True
29
30 def getfile(self, rel_path, full_path):
31 full_url = os.path.join(self._server_loc, rel_path)
32 stat, output, errput = \
33 run_cmd("wget {} {} -nc --output-document {}".format(self._args, full_url, full_path))
34 if (stat != 0):
35 logging.warning("wget failed with output: {} and errput {}\n".format(output, errput))
36 # wget puts an empty file if it fails.
37 try:
38 os.remove(full_path)
39 except OSError:
40 pass
41 return False
42 else:
43 logging.info("SUCCESS\n")
44 return True
45
46 def getdirectory(self, rel_path, full_path):
47 full_url = os.path.join(self._server_loc, rel_path)
48 stat, output, errput = \
49 run_cmd("wget {} {} -r -N --no-directories ".format(self._args, full_url+os.sep), from_dir=full_path)
50 logger.debug(output)
51 logger.debug(errput)
52 if (stat != 0):
53 logging.warning("wget failed with output: {} and errput {}\n".format(output, errput))
54 # wget puts an empty file if it fails.
55 try:
56 os.remove(full_path)
57 except OSError:
58 pass
59 return False
60 else:
61 logging.info("SUCCESS\n")
62 return True
63
[end of scripts/lib/CIME/Servers/wget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/lib/CIME/Servers/wget.py b/scripts/lib/CIME/Servers/wget.py
--- a/scripts/lib/CIME/Servers/wget.py
+++ b/scripts/lib/CIME/Servers/wget.py
@@ -15,9 +15,9 @@
self._args += "--password {} ".format(passwd)
self._server_loc = address
- err = run_cmd("wget {} --spider {}".format(self._args, address))[0]
- expect(err == 0,"Could not connect to repo '{0}'\nThis is most likely either a proxy, or network issue .")
-
+ cmd = "wget {} --no-check-certificate --spider {}".format(self._args, address)
+ err, output, _ = run_cmd(cmd, combine_output=True)
+ expect(err == 0,"Could not connect to repo via '{}'\nThis is most likely either a proxy, or network issue.\nOutput:\n{}".format(cmd, output.encode('utf-8')))
def fileexists(self, rel_path):
full_url = os.path.join(self._server_loc, rel_path)
@@ -30,9 +30,9 @@
def getfile(self, rel_path, full_path):
full_url = os.path.join(self._server_loc, rel_path)
stat, output, errput = \
- run_cmd("wget {} {} -nc --output-document {}".format(self._args, full_url, full_path))
+ run_cmd("wget {} {} -nc --no-check-certificate --output-document {}".format(self._args, full_url, full_path))
if (stat != 0):
- logging.warning("wget failed with output: {} and errput {}\n".format(output, errput))
+ logging.warning("wget failed with output: {} and errput {}\n".format(output.encode('utf-8'), errput.encode('utf-8')))
# wget puts an empty file if it fails.
try:
os.remove(full_path)
@@ -46,7 +46,7 @@
def getdirectory(self, rel_path, full_path):
full_url = os.path.join(self._server_loc, rel_path)
stat, output, errput = \
- run_cmd("wget {} {} -r -N --no-directories ".format(self._args, full_url+os.sep), from_dir=full_path)
+ run_cmd("wget {} {} -r -N --no-check-certificate --no-directories ".format(self._args, full_url+os.sep), from_dir=full_path)
logger.debug(output)
logger.debug(errput)
if (stat != 0):
| {"golden_diff": "diff --git a/scripts/lib/CIME/Servers/wget.py b/scripts/lib/CIME/Servers/wget.py\n--- a/scripts/lib/CIME/Servers/wget.py\n+++ b/scripts/lib/CIME/Servers/wget.py\n@@ -15,9 +15,9 @@\n self._args += \"--password {} \".format(passwd)\n self._server_loc = address\n \n- err = run_cmd(\"wget {} --spider {}\".format(self._args, address))[0]\n- expect(err == 0,\"Could not connect to repo '{0}'\\nThis is most likely either a proxy, or network issue .\")\n-\n+ cmd = \"wget {} --no-check-certificate --spider {}\".format(self._args, address)\n+ err, output, _ = run_cmd(cmd, combine_output=True)\n+ expect(err == 0,\"Could not connect to repo via '{}'\\nThis is most likely either a proxy, or network issue.\\nOutput:\\n{}\".format(cmd, output.encode('utf-8')))\n \n def fileexists(self, rel_path):\n full_url = os.path.join(self._server_loc, rel_path)\n@@ -30,9 +30,9 @@\n def getfile(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n- run_cmd(\"wget {} {} -nc --output-document {}\".format(self._args, full_url, full_path))\n+ run_cmd(\"wget {} {} -nc --no-check-certificate --output-document {}\".format(self._args, full_url, full_path))\n if (stat != 0):\n- logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output, errput))\n+ logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output.encode('utf-8'), errput.encode('utf-8')))\n # wget puts an empty file if it fails.\n try:\n os.remove(full_path)\n@@ -46,7 +46,7 @@\n def getdirectory(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n- run_cmd(\"wget {} {} -r -N --no-directories \".format(self._args, full_url+os.sep), from_dir=full_path)\n+ run_cmd(\"wget {} {} -r -N --no-check-certificate --no-directories \".format(self._args, full_url+os.sep), from_dir=full_path)\n logger.debug(output)\n logger.debug(errput)\n if (stat != 0):\n", "issue": "Branch a single instance case from a multi-instance case\nWhen a multi-instance CAM forecast fails, I want to use one of the failed instances\r\nas an exact restart in a single instance case, to speed debugging.\r\nBuilding it as hybrid or startup won't work because for CAM \r\nthose are not exact restarts; they use the CAM initial file\r\nI first tried building a single instance branch run with the multi-instance\r\nas the refcase. That branched the entire multi-instance run.\r\n\r\nThen I tried copying all of the restart files for 1 instance into a new directory, \r\n\"Restarts\", with instance numbers removed from the file names.\r\nI built the single instance case with Restarts as the RUN_REFDIR.\r\nIt built, but when I tried to run it it complained about a mozart file\r\nalready existing in the run directory. I believe that an earlier stage\r\nof the submit process created that file, so I'm stumped about how\r\nto fix this problem. I've played with GET_REFCASE and CONTINUE_RUN\r\nand other things, but have not found the magic combination.\r\nAm I missing something, or is this a new capability that hasn't been implemented?\r\n\r\n$CASEROOT = /gpfs/fs1/work/raeder/Exp/Debug_lwdn4\r\n$RUNDIR = /gpfs/fs1/scratch/raeder/Debug_lwdn4/run\r\n$CESMROOT = /glade/work/raeder/Models/cesm2_2_maint-5.6\r\n\r\nThanks\r\nKevin\r\n\n", "before_files": [{"content": "\"\"\"\nWGET Server class. Interact with a server using WGET protocol\n\"\"\"\n# pylint: disable=super-init-not-called\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.Servers.generic_server import GenericServer\nlogger = logging.getLogger(__name__)\n\nclass WGET(GenericServer):\n def __init__(self, address, user='', passwd=''):\n self._args = ''\n if user:\n self._args += \"--user {} \".format(user)\n if passwd:\n self._args += \"--password {} \".format(passwd)\n self._server_loc = address\n\n err = run_cmd(\"wget {} --spider {}\".format(self._args, address))[0]\n expect(err == 0,\"Could not connect to repo '{0}'\\nThis is most likely either a proxy, or network issue .\")\n\n\n def fileexists(self, rel_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, out, err = run_cmd(\"wget {} --spider {}\".format(self._args, full_url))\n if (stat != 0):\n logging.warning(\"FAIL: Repo '{}' does not have file '{}'\\nReason:{}\\n{}\\n\".format(self._server_loc, full_url, out.encode('utf-8'), err.encode('utf-8')))\n return False\n return True\n\n def getfile(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n run_cmd(\"wget {} {} -nc --output-document {}\".format(self._args, full_url, full_path))\n if (stat != 0):\n logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output, errput))\n # wget puts an empty file if it fails.\n try:\n os.remove(full_path)\n except OSError:\n pass\n return False\n else:\n logging.info(\"SUCCESS\\n\")\n return True\n\n def getdirectory(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n run_cmd(\"wget {} {} -r -N --no-directories \".format(self._args, full_url+os.sep), from_dir=full_path)\n logger.debug(output)\n logger.debug(errput)\n if (stat != 0):\n logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output, errput))\n # wget puts an empty file if it fails.\n try:\n os.remove(full_path)\n except OSError:\n pass\n return False\n else:\n logging.info(\"SUCCESS\\n\")\n return True\n", "path": "scripts/lib/CIME/Servers/wget.py"}]} | 1,560 | 572 |
gh_patches_debug_14006 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3341 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider northern_california_breweries is broken
During the global build at 2021-06-23-14-42-18, spider **northern_california_breweries** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/northern_california_breweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/northern_california_breweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/northern_california_breweries.geojson))
</issue>
<code>
[start of locations/spiders/northern_california_breweries.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 from locations.items import GeojsonPointItem
4 import json
5 import re
6
7 class NorthernCaliforniaBreweriesSpider(scrapy.Spider):
8 name = "northern_california_breweries"
9 allowed_domains = ["projects.sfchronicle.com"]
10 start_urls = (
11 'http://projects.sfchronicle.com/2017/brewery-map/',
12 )
13
14 def parse(self, response):
15 beerData = response.xpath("//*[text()[contains(.,'beerData')]]").extract_first()
16 matches = re.search(r"var beerData = (\[(.*)\])", beerData)
17 jsonData = matches.group(0).replace("var beerData = ","")
18 breweryList = json.loads(jsonData)
19
20 for item in breweryList:
21 yield GeojsonPointItem(
22 ref=item.get('Brewery'),
23 lat=float(item.get('Latitude')),
24 lon=float(item.get('Longitude')),
25 addr_full=item.get('Address'),
26 city=item.get('City'),
27 state="CA",
28 website=item.get('Website'),
29 )
30
[end of locations/spiders/northern_california_breweries.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/northern_california_breweries.py b/locations/spiders/northern_california_breweries.py
--- a/locations/spiders/northern_california_breweries.py
+++ b/locations/spiders/northern_california_breweries.py
@@ -18,10 +18,19 @@
breweryList = json.loads(jsonData)
for item in breweryList:
+ latitude = None
+ longitude = None
+
+ if item.get('Latitude') is not None:
+ latitude = float(item.get('Latitude'))
+
+ if item.get('Longitude') is not None:
+ longitude = float(item.get('Longitude'))
+
yield GeojsonPointItem(
ref=item.get('Brewery'),
- lat=float(item.get('Latitude')),
- lon=float(item.get('Longitude')),
+ lat=latitude,
+ lon=longitude,
addr_full=item.get('Address'),
city=item.get('City'),
state="CA",
| {"golden_diff": "diff --git a/locations/spiders/northern_california_breweries.py b/locations/spiders/northern_california_breweries.py\n--- a/locations/spiders/northern_california_breweries.py\n+++ b/locations/spiders/northern_california_breweries.py\n@@ -18,10 +18,19 @@\n breweryList = json.loads(jsonData)\n \n for item in breweryList:\n+ latitude = None\n+ longitude = None\n+\n+ if item.get('Latitude') is not None:\n+ latitude = float(item.get('Latitude'))\n+\n+ if item.get('Longitude') is not None:\n+ longitude = float(item.get('Longitude'))\n+ \n yield GeojsonPointItem(\n ref=item.get('Brewery'),\n- lat=float(item.get('Latitude')),\n- lon=float(item.get('Longitude')),\n+ lat=latitude,\n+ lon=longitude,\n addr_full=item.get('Address'),\n city=item.get('City'),\n state=\"CA\",\n", "issue": "Spider northern_california_breweries is broken\nDuring the global build at 2021-06-23-14-42-18, spider **northern_california_breweries** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/northern_california_breweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/northern_california_breweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/northern_california_breweries.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\nimport re\n\nclass NorthernCaliforniaBreweriesSpider(scrapy.Spider):\n name = \"northern_california_breweries\"\n allowed_domains = [\"projects.sfchronicle.com\"]\n start_urls = (\n 'http://projects.sfchronicle.com/2017/brewery-map/',\n )\n\n def parse(self, response):\n beerData = response.xpath(\"//*[text()[contains(.,'beerData')]]\").extract_first()\n matches = re.search(r\"var beerData = (\\[(.*)\\])\", beerData)\n jsonData = matches.group(0).replace(\"var beerData = \",\"\")\n breweryList = json.loads(jsonData)\n\n for item in breweryList:\n yield GeojsonPointItem(\n ref=item.get('Brewery'),\n lat=float(item.get('Latitude')),\n lon=float(item.get('Longitude')),\n addr_full=item.get('Address'),\n city=item.get('City'),\n state=\"CA\",\n website=item.get('Website'),\n )\n", "path": "locations/spiders/northern_california_breweries.py"}]} | 1,039 | 221 |
gh_patches_debug_59836 | rasdani/github-patches | git_diff | angr__angr-4105 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Duplicate member docs on subclasses
### Description
e.g. the documentation on SimCC's members is also present on SimCCUsercall. This is a huge problem considering that the api docs page is already fucking gigantic, this is just making it multiplicatively bigger.
### Steps to reproduce the bug
_No response_
### Environment
_No response_
### Additional context
_No response_
</issue>
<code>
[start of docs/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # For the full list of built-in configuration values, see the documentation:
4 # https://www.sphinx-doc.org/en/master/usage/configuration.html
5
6 import datetime
7
8 # -- Project information -----------------------------------------------------
9 # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
10
11 project = "angr"
12 project_copyright = f"{datetime.datetime.now().year}, The angr Project contributors"
13 author = "The angr Project"
14
15 # -- General configuration ---------------------------------------------------
16 # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
17
18 extensions = [
19 "sphinx.ext.autodoc",
20 "sphinx.ext.autosectionlabel",
21 "sphinx.ext.autosummary",
22 "sphinx.ext.coverage",
23 "sphinx.ext.intersphinx",
24 "sphinx.ext.napoleon",
25 "sphinx.ext.todo",
26 "sphinx.ext.viewcode",
27 "sphinx_autodoc_typehints",
28 "myst_parser",
29 ]
30
31 templates_path = ["_templates"]
32 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
33
34 # -- Options for autodoc -----------------------------------------------------
35 # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#configuration
36 autoclass_content = "class"
37 autodoc_default_options = {
38 "members": True,
39 "member-order": "bysource",
40 "inherited-members": True,
41 "show-inheritance": True,
42 "special-members": "__init__",
43 "undoc-members": True,
44 }
45 autodoc_inherit_docstrings = True
46 autodoc_typehints = "both"
47
48 # -- Options for coverage ----------------------------------------------------
49 # https://www.sphinx-doc.org/en/master/usage/extensions/coverage.html
50 coverage_write_headline = False
51
52 coverage_ignore_pyobjects = [
53 "angr.analyses.decompiler.structured_codegen.c.StructuredCodeGenerator", # Alias to CStructuredCodeGenerator
54 "angr.sim_type.SimTypeFixedSizeArray", # Alias to SimTypeArray
55 ]
56
57 # -- Options for intersphinx -------------------------------------------------
58 # https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
59 intersphinx_mapping = {
60 "python": ("https://docs.python.org/3", None),
61 "ailment": ("https://docs.angr.io/projects/ailment/en/latest/", None),
62 "archinfo": ("https://docs.angr.io/projects/archinfo/en/latest/", None),
63 "claripy": ("https://docs.angr.io/projects/claripy/en/latest/", None),
64 "cle": ("https://docs.angr.io/projects/cle/en/latest/", None),
65 "pypcode": ("https://docs.angr.io/projects/pypcode/en/latest/", None),
66 "pyvex": ("https://docs.angr.io/projects/pyvex/en/latest/", None),
67 }
68
69 # -- Options for todos -------------------------------------------------------
70 # https://www.sphinx-doc.org/en/master/usage/extensions/todo.html
71 todo_include_todos = True
72
73 # -- Options for HTML output -------------------------------------------------
74 # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
75
76 html_theme = "furo"
77 html_static_path = ["_static"]
78
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -37,7 +37,6 @@
autodoc_default_options = {
"members": True,
"member-order": "bysource",
- "inherited-members": True,
"show-inheritance": True,
"special-members": "__init__",
"undoc-members": True,
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -37,7 +37,6 @@\n autodoc_default_options = {\n \"members\": True,\n \"member-order\": \"bysource\",\n- \"inherited-members\": True,\n \"show-inheritance\": True,\n \"special-members\": \"__init__\",\n \"undoc-members\": True,\n", "issue": "Duplicate member docs on subclasses\n### Description\n\ne.g. the documentation on SimCC's members is also present on SimCCUsercall. This is a huge problem considering that the api docs page is already fucking gigantic, this is just making it multiplicatively bigger.\n\n### Steps to reproduce the bug\n\n_No response_\n\n### Environment\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\nimport datetime\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = \"angr\"\nproject_copyright = f\"{datetime.datetime.now().year}, The angr Project contributors\"\nauthor = \"The angr Project\"\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"sphinx_autodoc_typehints\",\n \"myst_parser\",\n]\n\ntemplates_path = [\"_templates\"]\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# -- Options for autodoc -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#configuration\nautoclass_content = \"class\"\nautodoc_default_options = {\n \"members\": True,\n \"member-order\": \"bysource\",\n \"inherited-members\": True,\n \"show-inheritance\": True,\n \"special-members\": \"__init__\",\n \"undoc-members\": True,\n}\nautodoc_inherit_docstrings = True\nautodoc_typehints = \"both\"\n\n# -- Options for coverage ----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/extensions/coverage.html\ncoverage_write_headline = False\n\ncoverage_ignore_pyobjects = [\n \"angr.analyses.decompiler.structured_codegen.c.StructuredCodeGenerator\", # Alias to CStructuredCodeGenerator\n \"angr.sim_type.SimTypeFixedSizeArray\", # Alias to SimTypeArray\n]\n\n# -- Options for intersphinx -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"ailment\": (\"https://docs.angr.io/projects/ailment/en/latest/\", None),\n \"archinfo\": (\"https://docs.angr.io/projects/archinfo/en/latest/\", None),\n \"claripy\": (\"https://docs.angr.io/projects/claripy/en/latest/\", None),\n \"cle\": (\"https://docs.angr.io/projects/cle/en/latest/\", None),\n \"pypcode\": (\"https://docs.angr.io/projects/pypcode/en/latest/\", None),\n \"pyvex\": (\"https://docs.angr.io/projects/pyvex/en/latest/\", None),\n}\n\n# -- Options for todos -------------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/extensions/todo.html\ntodo_include_todos = True\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = \"furo\"\nhtml_static_path = [\"_static\"]\n", "path": "docs/conf.py"}]} | 1,453 | 93 |
gh_patches_debug_40226 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3113 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py]
1 from typing import Optional
2
3 import torch.nn as nn
4 from transformers.models.gpt2.configuration_gpt2 import GPT2Config
5 from transformers.models.gpt2.modeling_gpt2 import GPT2Model
6
7 from ..base import Critic
8
9
10 class GPTCritic(Critic):
11 """
12 GPT Critic model.
13
14 Args:
15 pretrained (str): Pretrained model name or path.
16 config (GPT2Config): Model config.
17 checkpoint (bool): Enable gradient checkpointing.
18 """
19
20 def __init__(self,
21 pretrained: Optional[str] = None,
22 config: Optional[GPT2Config] = None,
23 checkpoint: bool = False,
24 **kwargs) -> None:
25 if pretrained is not None:
26 model = GPT2Model.from_pretrained(pretrained)
27 elif config is not None:
28 model = GPT2Model(config)
29 else:
30 model = GPT2Model(GPT2Config())
31 if checkpoint:
32 model.gradient_checkpointing_enable()
33 value_head = nn.Linear(model.config.n_embd, 1)
34 super().__init__(model, value_head, **kwargs)
35
[end of applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py]
[start of applications/ChatGPT/chatgpt/models/opt/opt_critic.py]
1 from typing import Optional
2
3 import torch.nn as nn
4 from transformers.models.opt.configuration_opt import OPTConfig
5 from transformers.models.opt.modeling_opt import OPTModel
6
7 from ..base import Critic
8
9
10 class OPTCritic(Critic):
11 """
12 OPT Critic model.
13
14 Args:
15 pretrained (str): Pretrained model name or path.
16 config (OPTConfig): Model config.
17 checkpoint (bool): Enable gradient checkpointing.
18 lora_rank (int): Rank of the low-rank approximation.
19 lora_train_bias (str): LoRA bias training mode.
20 """
21
22 def __init__(self,
23 pretrained: Optional[str] = None,
24 config: Optional[OPTConfig] = None,
25 checkpoint: bool = False,
26 lora_rank: int = 0,
27 lora_train_bias: str = 'none',
28 **kwargs) -> None:
29 if pretrained is not None:
30 model = OPTModel.from_pretrained(pretrained)
31 elif config is not None:
32 model = OPTModel(config)
33 else:
34 model = OPTModel(OPTConfig())
35 if checkpoint:
36 model.gradient_checkpointing_enable()
37 value_head = nn.Linear(model.config.hidden_size, 1)
38 super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
39
[end of applications/ChatGPT/chatgpt/models/opt/opt_critic.py]
[start of applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py]
1 from typing import Optional
2
3 from transformers.models.gpt2.configuration_gpt2 import GPT2Config
4 from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
5
6 from ..base import Actor
7
8
9 class GPTActor(Actor):
10 """
11 GPT Actor model.
12
13 Args:
14 pretrained (str): Pretrained model name or path.
15 config (GPT2Config): Model config.
16 checkpoint (bool): Enable gradient checkpointing.
17 """
18
19 def __init__(self,
20 pretrained: Optional[str] = None,
21 config: Optional[GPT2Config] = None,
22 checkpoint: bool = False) -> None:
23 if pretrained is not None:
24 model = GPT2LMHeadModel.from_pretrained(pretrained)
25 elif config is not None:
26 model = GPT2LMHeadModel(config)
27 else:
28 model = GPT2LMHeadModel(GPT2Config())
29 if checkpoint:
30 model.gradient_checkpointing_enable()
31 super().__init__(model)
32
[end of applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py b/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py
--- a/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py
+++ b/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py
@@ -14,12 +14,16 @@
pretrained (str): Pretrained model name or path.
config (GPT2Config): Model config.
checkpoint (bool): Enable gradient checkpointing.
+ lora_rank (int): Rank of the LoRa layer.
+ lora_train_bias (str): Bias training strategy for the LoRa layer.
"""
def __init__(self,
pretrained: Optional[str] = None,
config: Optional[GPT2Config] = None,
- checkpoint: bool = False) -> None:
+ checkpoint: bool = False,
+ lora_rank: int = 0,
+ lora_train_bias: str = 'none') -> None:
if pretrained is not None:
model = GPT2LMHeadModel.from_pretrained(pretrained)
elif config is not None:
@@ -28,4 +32,4 @@
model = GPT2LMHeadModel(GPT2Config())
if checkpoint:
model.gradient_checkpointing_enable()
- super().__init__(model)
+ super().__init__(model, lora_rank, lora_train_bias)
diff --git a/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py b/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py
--- a/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py
+++ b/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py
@@ -15,13 +15,16 @@
pretrained (str): Pretrained model name or path.
config (GPT2Config): Model config.
checkpoint (bool): Enable gradient checkpointing.
+ lora_rank (int): Rank of the LO-RA decomposition.
+ lora_train_bias (str): LoRA bias training mode.
"""
def __init__(self,
pretrained: Optional[str] = None,
config: Optional[GPT2Config] = None,
checkpoint: bool = False,
- **kwargs) -> None:
+ lora_rank: int = 0,
+ lora_train_bias: str = 'none') -> None:
if pretrained is not None:
model = GPT2Model.from_pretrained(pretrained)
elif config is not None:
@@ -31,4 +34,4 @@
if checkpoint:
model.gradient_checkpointing_enable()
value_head = nn.Linear(model.config.n_embd, 1)
- super().__init__(model, value_head, **kwargs)
+ super().__init__(model, value_head, lora_rank, lora_train_bias)
diff --git a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py
--- a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py
+++ b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py
@@ -34,5 +34,5 @@
model = OPTModel(OPTConfig())
if checkpoint:
model.gradient_checkpointing_enable()
- value_head = nn.Linear(model.config.hidden_size, 1)
+ value_head = nn.Linear(model.config.word_embed_proj_dim, 1)
super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
| {"golden_diff": "diff --git a/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py b/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py\n--- a/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py\n+++ b/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py\n@@ -14,12 +14,16 @@\n pretrained (str): Pretrained model name or path.\n config (GPT2Config): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n+ lora_rank (int): Rank of the LoRa layer.\n+ lora_train_bias (str): Bias training strategy for the LoRa layer.\n \"\"\"\n \n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[GPT2Config] = None,\n- checkpoint: bool = False) -> None:\n+ checkpoint: bool = False,\n+ lora_rank: int = 0,\n+ lora_train_bias: str = 'none') -> None:\n if pretrained is not None:\n model = GPT2LMHeadModel.from_pretrained(pretrained)\n elif config is not None:\n@@ -28,4 +32,4 @@\n model = GPT2LMHeadModel(GPT2Config())\n if checkpoint:\n model.gradient_checkpointing_enable()\n- super().__init__(model)\n+ super().__init__(model, lora_rank, lora_train_bias)\ndiff --git a/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py b/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py\n--- a/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py\n+++ b/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py\n@@ -15,13 +15,16 @@\n pretrained (str): Pretrained model name or path.\n config (GPT2Config): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n+ lora_rank (int): Rank of the LO-RA decomposition.\n+ lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n \n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[GPT2Config] = None,\n checkpoint: bool = False,\n- **kwargs) -> None:\n+ lora_rank: int = 0,\n+ lora_train_bias: str = 'none') -> None:\n if pretrained is not None:\n model = GPT2Model.from_pretrained(pretrained)\n elif config is not None:\n@@ -31,4 +34,4 @@\n if checkpoint:\n model.gradient_checkpointing_enable()\n value_head = nn.Linear(model.config.n_embd, 1)\n- super().__init__(model, value_head, **kwargs)\n+ super().__init__(model, value_head, lora_rank, lora_train_bias)\ndiff --git a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n--- a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n+++ b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n@@ -34,5 +34,5 @@\n model = OPTModel(OPTConfig())\n if checkpoint:\n model.gradient_checkpointing_enable()\n- value_head = nn.Linear(model.config.hidden_size, 1)\n+ value_head = nn.Linear(model.config.word_embed_proj_dim, 1)\n super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch.nn as nn\nfrom transformers.models.gpt2.configuration_gpt2 import GPT2Config\nfrom transformers.models.gpt2.modeling_gpt2 import GPT2Model\n\nfrom ..base import Critic\n\n\nclass GPTCritic(Critic):\n \"\"\"\n GPT Critic model.\n\n Args:\n pretrained (str): Pretrained model name or path.\n config (GPT2Config): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n \"\"\"\n\n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[GPT2Config] = None,\n checkpoint: bool = False,\n **kwargs) -> None:\n if pretrained is not None:\n model = GPT2Model.from_pretrained(pretrained)\n elif config is not None:\n model = GPT2Model(config)\n else:\n model = GPT2Model(GPT2Config())\n if checkpoint:\n model.gradient_checkpointing_enable()\n value_head = nn.Linear(model.config.n_embd, 1)\n super().__init__(model, value_head, **kwargs)\n", "path": "applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py"}, {"content": "from typing import Optional\n\nimport torch.nn as nn\nfrom transformers.models.opt.configuration_opt import OPTConfig\nfrom transformers.models.opt.modeling_opt import OPTModel\n\nfrom ..base import Critic\n\n\nclass OPTCritic(Critic):\n \"\"\"\n OPT Critic model.\n\n Args:\n pretrained (str): Pretrained model name or path.\n config (OPTConfig): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n lora_rank (int): Rank of the low-rank approximation.\n lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n\n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[OPTConfig] = None,\n checkpoint: bool = False,\n lora_rank: int = 0,\n lora_train_bias: str = 'none',\n **kwargs) -> None:\n if pretrained is not None:\n model = OPTModel.from_pretrained(pretrained)\n elif config is not None:\n model = OPTModel(config)\n else:\n model = OPTModel(OPTConfig())\n if checkpoint:\n model.gradient_checkpointing_enable()\n value_head = nn.Linear(model.config.hidden_size, 1)\n super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)\n", "path": "applications/ChatGPT/chatgpt/models/opt/opt_critic.py"}, {"content": "from typing import Optional\n\nfrom transformers.models.gpt2.configuration_gpt2 import GPT2Config\nfrom transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel\n\nfrom ..base import Actor\n\n\nclass GPTActor(Actor):\n \"\"\"\n GPT Actor model.\n\n Args:\n pretrained (str): Pretrained model name or path.\n config (GPT2Config): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n \"\"\"\n\n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[GPT2Config] = None,\n checkpoint: bool = False) -> None:\n if pretrained is not None:\n model = GPT2LMHeadModel.from_pretrained(pretrained)\n elif config is not None:\n model = GPT2LMHeadModel(config)\n else:\n model = GPT2LMHeadModel(GPT2Config())\n if checkpoint:\n model.gradient_checkpointing_enable()\n super().__init__(model)\n", "path": "applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py"}]} | 1,578 | 810 |
gh_patches_debug_10701 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1360 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
skimage.novice not handling alpha values (or maybe something worse)
We'd like to use `skimage.novice` to teach image manipulation in Software Carpentry, but it looks like `skimage.novice` isn't handling alpha values correctly (though the problem may be deeper).
Test image is a shrunken survey map of Antarctic:

Step 1: load and display using ipython 2.2.0 via conda 3.7.3 on Mac OS X 10.10.1:
```
In [1]: from skimage import novice
In [2]: p1 = novice.open('ant.jpg')
In [3]: p1.show()
```

That's not right...
Step 2: paint the lower left green:
```
In [4]: p1.size
Out[4]: (120, 121)
In [5]: p1[0:60, 0:60] = (0, 255, 0)
In [6]: p1.show()
```

Looks like alpha blending is going on.
Step 3: create a new blank white canvas:
```
In [7]: p2 = novice.Picture.from_size((200, 200), (255, 255, 255))
In [8]: p2.show()
```

That looks OK, but now color part of it green:
```
In [9]: p2[0:60, 0:60] = (0, 255, 0)
In [10]: p2.show()
```

and the green doesn't show up at all.
</issue>
<code>
[start of skimage/io/_plugins/pil_plugin.py]
1 __all__ = ['imread', 'imsave']
2
3 import numpy as np
4 from six import string_types
5 from PIL import Image
6
7 from ...util import img_as_ubyte, img_as_uint
8 from ...external.tifffile import imread as tif_imread, imsave as tif_imsave
9
10
11 def imread(fname, dtype=None, img_num=None, **kwargs):
12 """Load an image from file.
13
14 Parameters
15 ----------
16 fname : str
17 File name.
18 dtype : numpy dtype object or string specifier
19 Specifies data type of array elements.
20 img_num : int, optional
21 Specifies which image to read in a file with multiple images
22 (zero-indexed).
23 kwargs : keyword pairs, optional
24 Addition keyword arguments to pass through (only applicable to Tiff
25 files for now, see `tifffile`'s `imread` function).
26
27 Notes
28 -----
29 Tiff files are handled by Christophe Golhke's tifffile.py [1]_, and support many
30 advanced image types including multi-page and floating point.
31
32 All other files are read using the Python Imaging Libary.
33 See PIL docs [2]_ for a list of supported formats.
34
35 References
36 ----------
37 .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html
38 .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
39
40 """
41 if hasattr(fname, 'lower') and dtype is None:
42 kwargs.setdefault('key', img_num)
43 if fname.lower().endswith(('.tiff', '.tif')):
44 return tif_imread(fname, **kwargs)
45
46 im = Image.open(fname)
47 try:
48 # this will raise an IOError if the file is not readable
49 im.getdata()[0]
50 except IOError:
51 site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
52 raise ValueError('Could not load "%s"\nPlease see documentation at: %s' % (fname, site))
53 else:
54 return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
55
56
57 def pil_to_ndarray(im, dtype=None, img_num=None):
58 """Import a PIL Image object to an ndarray, in memory.
59
60 Parameters
61 ----------
62 Refer to ``imread``.
63
64 """
65 frames = []
66 grayscale = None
67 i = 0
68 while 1:
69 try:
70 im.seek(i)
71 except EOFError:
72 break
73
74 frame = im
75
76 if not img_num is None and img_num != i:
77 im.getdata()[0]
78 i += 1
79 continue
80
81 if im.mode == 'P':
82 if grayscale is None:
83 grayscale = _palette_is_grayscale(im)
84
85 if grayscale:
86 frame = im.convert('L')
87 else:
88 frame = im.convert('RGB')
89
90 elif im.mode == '1':
91 frame = im.convert('L')
92
93 elif 'A' in im.mode:
94 frame = im.convert('RGBA')
95
96
97 if im.mode.startswith('I;16'):
98 shape = im.size
99 dtype = '>u2' if im.mode.endswith('B') else '<u2'
100 if 'S' in im.mode:
101 dtype = dtype.replace('u', 'i')
102 frame = np.fromstring(frame.tobytes(), dtype)
103 frame.shape = shape[::-1]
104
105 else:
106 frame = np.array(frame, dtype=dtype)
107
108 frames.append(frame)
109 i += 1
110
111 if hasattr(im, 'fp') and im.fp:
112 im.fp.close()
113
114 if img_num is None and len(frames) > 1:
115 return np.array(frames)
116 elif frames:
117 return frames[0]
118 elif img_num:
119 raise IndexError('Could not find image #%s' % img_num)
120
121
122 def _palette_is_grayscale(pil_image):
123 """Return True if PIL image in palette mode is grayscale.
124
125 Parameters
126 ----------
127 pil_image : PIL image
128 PIL Image that is in Palette mode.
129
130 Returns
131 -------
132 is_grayscale : bool
133 True if all colors in image palette are gray.
134 """
135 assert pil_image.mode == 'P'
136 # get palette as an array with R, G, B columns
137 palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
138 # Not all palette colors are used; unused colors have junk values.
139 start, stop = pil_image.getextrema()
140 valid_palette = palette[start:stop]
141 # Image is grayscale if channel differences (R - G and G - B)
142 # are all zero.
143 return np.allclose(np.diff(valid_palette), 0)
144
145
146 def ndarray_to_pil(arr, format_str=None):
147 """Export an ndarray to a PIL object.
148
149 Parameters
150 ----------
151 Refer to ``imsave``.
152
153 """
154 if arr.ndim == 3:
155 arr = img_as_ubyte(arr)
156 mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
157
158 elif format_str in ['png', 'PNG']:
159 mode = 'I;16'
160 mode_base = 'I'
161
162 if arr.dtype.kind == 'f':
163 arr = img_as_uint(arr)
164
165 elif arr.max() < 256 and arr.min() >= 0:
166 arr = arr.astype(np.uint8)
167 mode = mode_base = 'L'
168
169 else:
170 arr = img_as_uint(arr)
171
172 else:
173 arr = img_as_ubyte(arr)
174 mode = 'L'
175 mode_base = 'L'
176
177 if arr.ndim == 2:
178 im = Image.new(mode_base, arr.T.shape)
179 try:
180 im.frombytes(arr.tobytes(), 'raw', mode)
181 except AttributeError:
182 im.frombytes(arr.tostring(), 'raw', mode)
183
184 else:
185 try:
186 im = Image.frombytes(mode, (arr.shape[1], arr.shape[0]),
187 arr.tobytes())
188 except AttributeError:
189 im = Image.frombytes(mode, (arr.shape[1], arr.shape[0]),
190 arr.tostring())
191 return im
192
193
194 def imsave(fname, arr, format_str=None):
195 """Save an image to disk.
196
197 Parameters
198 ----------
199 fname : str or file-like object
200 Name of destination file.
201 arr : ndarray of uint8 or float
202 Array (image) to save. Arrays of data-type uint8 should have
203 values in [0, 255], whereas floating-point arrays must be
204 in [0, 1].
205 format_str: str
206 Format to save as, this is defaulted to PNG if using a file-like
207 object; this will be derived from the extension if fname is a string
208
209 Notes
210 -----
211 Tiff files are handled by Christophe Golhke's tifffile.py [1]_,
212 and support many advanced image types including multi-page and
213 floating point.
214
215 All other image formats use the Python Imaging Libary.
216 See PIL docs [2]_ for a list of other supported formats.
217 All images besides single channel PNGs are converted using `img_as_uint8`.
218 Single Channel PNGs have the following behavior:
219 - Integer values in [0, 255] and Boolean types -> img_as_uint8
220 - Floating point and other integers -> img_as_uint16
221
222 References
223 ----------
224 .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html
225 .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
226 """
227 # default to PNG if file-like object
228 if not isinstance(fname, string_types) and format_str is None:
229 format_str = "PNG"
230 # Check for png in filename
231 if (isinstance(fname, string_types)
232 and fname.lower().endswith(".png")):
233 format_str = "PNG"
234
235 arr = np.asanyarray(arr).squeeze()
236
237 if arr.dtype.kind == 'b':
238 arr = arr.astype(np.uint8)
239
240 use_tif = False
241 if hasattr(fname, 'lower'):
242 if fname.lower().endswith(('.tiff', '.tif')):
243 use_tif = True
244 if not format_str is None:
245 if format_str.lower() in ['tiff', 'tif']:
246 use_tif = True
247
248 if use_tif:
249 tif_imsave(fname, arr)
250 return
251
252 if arr.ndim not in (2, 3):
253 raise ValueError("Invalid shape for image array: %s" % arr.shape)
254
255 if arr.ndim == 3:
256 if arr.shape[2] not in (3, 4):
257 raise ValueError("Invalid number of channels in image array.")
258
259 img = ndarray_to_pil(arr, format_str=format_str)
260 img.save(fname, format=format_str)
261
262
263 def imshow(arr):
264 """Display an image, using PIL's default display command.
265
266 Parameters
267 ----------
268 arr : ndarray
269 Image to display. Images of dtype float are assumed to be in
270 [0, 1]. Images of dtype uint8 are in [0, 255].
271
272 """
273 Image.fromarray(img_as_ubyte(arr)).show()
274
275
276 def _app_show():
277 pass
278
[end of skimage/io/_plugins/pil_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/io/_plugins/pil_plugin.py b/skimage/io/_plugins/pil_plugin.py
--- a/skimage/io/_plugins/pil_plugin.py
+++ b/skimage/io/_plugins/pil_plugin.py
@@ -73,7 +73,7 @@
frame = im
- if not img_num is None and img_num != i:
+ if img_num is not None and img_num != i:
im.getdata()[0]
i += 1
continue
@@ -93,6 +93,8 @@
elif 'A' in im.mode:
frame = im.convert('RGBA')
+ elif im.mode == 'CMYK':
+ frame = im.convert('RGB')
if im.mode.startswith('I;16'):
shape = im.size
| {"golden_diff": "diff --git a/skimage/io/_plugins/pil_plugin.py b/skimage/io/_plugins/pil_plugin.py\n--- a/skimage/io/_plugins/pil_plugin.py\n+++ b/skimage/io/_plugins/pil_plugin.py\n@@ -73,7 +73,7 @@\n \n frame = im\n \n- if not img_num is None and img_num != i:\n+ if img_num is not None and img_num != i:\n im.getdata()[0]\n i += 1\n continue\n@@ -93,6 +93,8 @@\n elif 'A' in im.mode:\n frame = im.convert('RGBA')\n \n+ elif im.mode == 'CMYK':\n+ frame = im.convert('RGB')\n \n if im.mode.startswith('I;16'):\n shape = im.size\n", "issue": "skimage.novice not handling alpha values (or maybe something worse)\nWe'd like to use `skimage.novice` to teach image manipulation in Software Carpentry, but it looks like `skimage.novice` isn't handling alpha values correctly (though the problem may be deeper).\n\nTest image is a shrunken survey map of Antarctic:\n\n\n\nStep 1: load and display using ipython 2.2.0 via conda 3.7.3 on Mac OS X 10.10.1:\n\n```\nIn [1]: from skimage import novice\nIn [2]: p1 = novice.open('ant.jpg')\nIn [3]: p1.show()\n```\n\n\n\nThat's not right...\n\nStep 2: paint the lower left green:\n\n```\nIn [4]: p1.size\nOut[4]: (120, 121)\nIn [5]: p1[0:60, 0:60] = (0, 255, 0)\nIn [6]: p1.show()\n```\n\n\n\nLooks like alpha blending is going on.\n\nStep 3: create a new blank white canvas:\n\n```\nIn [7]: p2 = novice.Picture.from_size((200, 200), (255, 255, 255))\nIn [8]: p2.show()\n```\n\n\n\nThat looks OK, but now color part of it green:\n\n```\nIn [9]: p2[0:60, 0:60] = (0, 255, 0)\nIn [10]: p2.show()\n```\n\n\n\nand the green doesn't show up at all.\n\n", "before_files": [{"content": "__all__ = ['imread', 'imsave']\n\nimport numpy as np\nfrom six import string_types\nfrom PIL import Image\n\nfrom ...util import img_as_ubyte, img_as_uint\nfrom ...external.tifffile import imread as tif_imread, imsave as tif_imsave\n\n\ndef imread(fname, dtype=None, img_num=None, **kwargs):\n \"\"\"Load an image from file.\n\n Parameters\n ----------\n fname : str\n File name.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements.\n img_num : int, optional\n Specifies which image to read in a file with multiple images\n (zero-indexed).\n kwargs : keyword pairs, optional\n Addition keyword arguments to pass through (only applicable to Tiff\n files for now, see `tifffile`'s `imread` function).\n\n Notes\n -----\n Tiff files are handled by Christophe Golhke's tifffile.py [1]_, and support many\n advanced image types including multi-page and floating point.\n\n All other files are read using the Python Imaging Libary.\n See PIL docs [2]_ for a list of supported formats.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html\n .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n\n \"\"\"\n if hasattr(fname, 'lower') and dtype is None:\n kwargs.setdefault('key', img_num)\n if fname.lower().endswith(('.tiff', '.tif')):\n return tif_imread(fname, **kwargs)\n\n im = Image.open(fname)\n try:\n # this will raise an IOError if the file is not readable\n im.getdata()[0]\n except IOError:\n site = \"http://pillow.readthedocs.org/en/latest/installation.html#external-libraries\"\n raise ValueError('Could not load \"%s\"\\nPlease see documentation at: %s' % (fname, site))\n else:\n return pil_to_ndarray(im, dtype=dtype, img_num=img_num)\n\n\ndef pil_to_ndarray(im, dtype=None, img_num=None):\n \"\"\"Import a PIL Image object to an ndarray, in memory.\n\n Parameters\n ----------\n Refer to ``imread``.\n\n \"\"\"\n frames = []\n grayscale = None\n i = 0\n while 1:\n try:\n im.seek(i)\n except EOFError:\n break\n\n frame = im\n\n if not img_num is None and img_num != i:\n im.getdata()[0]\n i += 1\n continue\n\n if im.mode == 'P':\n if grayscale is None:\n grayscale = _palette_is_grayscale(im)\n\n if grayscale:\n frame = im.convert('L')\n else:\n frame = im.convert('RGB')\n\n elif im.mode == '1':\n frame = im.convert('L')\n\n elif 'A' in im.mode:\n frame = im.convert('RGBA')\n\n\n if im.mode.startswith('I;16'):\n shape = im.size\n dtype = '>u2' if im.mode.endswith('B') else '<u2'\n if 'S' in im.mode:\n dtype = dtype.replace('u', 'i')\n frame = np.fromstring(frame.tobytes(), dtype)\n frame.shape = shape[::-1]\n\n else:\n frame = np.array(frame, dtype=dtype)\n\n frames.append(frame)\n i += 1\n\n if hasattr(im, 'fp') and im.fp:\n im.fp.close()\n\n if img_num is None and len(frames) > 1:\n return np.array(frames)\n elif frames:\n return frames[0]\n elif img_num:\n raise IndexError('Could not find image #%s' % img_num)\n\n\ndef _palette_is_grayscale(pil_image):\n \"\"\"Return True if PIL image in palette mode is grayscale.\n\n Parameters\n ----------\n pil_image : PIL image\n PIL Image that is in Palette mode.\n\n Returns\n -------\n is_grayscale : bool\n True if all colors in image palette are gray.\n \"\"\"\n assert pil_image.mode == 'P'\n # get palette as an array with R, G, B columns\n palette = np.asarray(pil_image.getpalette()).reshape((256, 3))\n # Not all palette colors are used; unused colors have junk values.\n start, stop = pil_image.getextrema()\n valid_palette = palette[start:stop]\n # Image is grayscale if channel differences (R - G and G - B)\n # are all zero.\n return np.allclose(np.diff(valid_palette), 0)\n\n\ndef ndarray_to_pil(arr, format_str=None):\n \"\"\"Export an ndarray to a PIL object.\n\n Parameters\n ----------\n Refer to ``imsave``.\n\n \"\"\"\n if arr.ndim == 3:\n arr = img_as_ubyte(arr)\n mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]\n\n elif format_str in ['png', 'PNG']:\n mode = 'I;16'\n mode_base = 'I'\n\n if arr.dtype.kind == 'f':\n arr = img_as_uint(arr)\n\n elif arr.max() < 256 and arr.min() >= 0:\n arr = arr.astype(np.uint8)\n mode = mode_base = 'L'\n\n else:\n arr = img_as_uint(arr)\n\n else:\n arr = img_as_ubyte(arr)\n mode = 'L'\n mode_base = 'L'\n\n if arr.ndim == 2:\n im = Image.new(mode_base, arr.T.shape)\n try:\n im.frombytes(arr.tobytes(), 'raw', mode)\n except AttributeError:\n im.frombytes(arr.tostring(), 'raw', mode)\n\n else:\n try:\n im = Image.frombytes(mode, (arr.shape[1], arr.shape[0]),\n arr.tobytes())\n except AttributeError:\n im = Image.frombytes(mode, (arr.shape[1], arr.shape[0]),\n arr.tostring())\n return im\n\n\ndef imsave(fname, arr, format_str=None):\n \"\"\"Save an image to disk.\n\n Parameters\n ----------\n fname : str or file-like object\n Name of destination file.\n arr : ndarray of uint8 or float\n Array (image) to save. Arrays of data-type uint8 should have\n values in [0, 255], whereas floating-point arrays must be\n in [0, 1].\n format_str: str\n Format to save as, this is defaulted to PNG if using a file-like\n object; this will be derived from the extension if fname is a string\n\n Notes\n -----\n Tiff files are handled by Christophe Golhke's tifffile.py [1]_,\n and support many advanced image types including multi-page and\n floating point.\n\n All other image formats use the Python Imaging Libary.\n See PIL docs [2]_ for a list of other supported formats.\n All images besides single channel PNGs are converted using `img_as_uint8`.\n Single Channel PNGs have the following behavior:\n - Integer values in [0, 255] and Boolean types -> img_as_uint8\n - Floating point and other integers -> img_as_uint16\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html\n .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n \"\"\"\n # default to PNG if file-like object\n if not isinstance(fname, string_types) and format_str is None:\n format_str = \"PNG\"\n # Check for png in filename\n if (isinstance(fname, string_types)\n and fname.lower().endswith(\".png\")):\n format_str = \"PNG\"\n\n arr = np.asanyarray(arr).squeeze()\n\n if arr.dtype.kind == 'b':\n arr = arr.astype(np.uint8)\n\n use_tif = False\n if hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n use_tif = True\n if not format_str is None:\n if format_str.lower() in ['tiff', 'tif']:\n use_tif = True\n\n if use_tif:\n tif_imsave(fname, arr)\n return\n\n if arr.ndim not in (2, 3):\n raise ValueError(\"Invalid shape for image array: %s\" % arr.shape)\n\n if arr.ndim == 3:\n if arr.shape[2] not in (3, 4):\n raise ValueError(\"Invalid number of channels in image array.\")\n\n img = ndarray_to_pil(arr, format_str=format_str)\n img.save(fname, format=format_str)\n\n\ndef imshow(arr):\n \"\"\"Display an image, using PIL's default display command.\n\n Parameters\n ----------\n arr : ndarray\n Image to display. Images of dtype float are assumed to be in\n [0, 1]. Images of dtype uint8 are in [0, 255].\n\n \"\"\"\n Image.fromarray(img_as_ubyte(arr)).show()\n\n\ndef _app_show():\n pass\n", "path": "skimage/io/_plugins/pil_plugin.py"}]} | 4,053 | 182 |
gh_patches_debug_31808 | rasdani/github-patches | git_diff | GPflow__GPflow-1843 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Anisotropic lengthscales
Currently, by default, all stationary kernels impose a positive transform on the lengthscales Parameter. For the case of AnisotropicStationary kernels, this reduces the viable parameter space by a factor of 2^(D-1), where D is the number of input dimensions. So it is acceptable in the case of a 1D problem, but in higher dimensions we would miss out. For example, for the Cosine kernel there is a significant difference between lengthscales of [+1, +1], and [+1, -1], they correspond to waves propagating in perpendicular directions.
</issue>
<code>
[start of gpflow/kernels/stationaries.py]
1 # Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, Optional
16
17 import numpy as np
18 import tensorflow as tf
19
20 from ..base import Parameter, TensorType
21 from ..utilities import positive
22 from ..utilities.ops import difference_matrix, square_distance
23 from .base import ActiveDims, Kernel
24
25
26 class Stationary(Kernel):
27 """
28 Base class for kernels that are stationary, that is, they only depend on
29
30 d = x - x'
31
32 This class handles 'ard' behaviour, which stands for 'Automatic Relevance
33 Determination'. This means that the kernel has one lengthscale per
34 dimension, otherwise the kernel is isotropic (has a single lengthscale).
35 """
36
37 def __init__(
38 self, variance: TensorType = 1.0, lengthscales: TensorType = 1.0, **kwargs: Any
39 ) -> None:
40 """
41 :param variance: the (initial) value for the variance parameter.
42 :param lengthscales: the (initial) value for the lengthscale
43 parameter(s), to induce ARD behaviour this must be initialised as
44 an array the same length as the the number of active dimensions
45 e.g. [1., 1., 1.]. If only a single value is passed, this value
46 is used as the lengthscale of each dimension.
47 :param kwargs: accepts `name` and `active_dims`, which is a list or
48 slice of indices which controls which columns of X are used (by
49 default, all columns are used).
50 """
51 for kwarg in kwargs:
52 if kwarg not in {"name", "active_dims"}:
53 raise TypeError(f"Unknown keyword argument: {kwarg}")
54
55 super().__init__(**kwargs)
56 self.variance = Parameter(variance, transform=positive())
57 self.lengthscales = Parameter(lengthscales, transform=positive())
58 self._validate_ard_active_dims(self.lengthscales)
59
60 @property
61 def ard(self) -> bool:
62 """
63 Whether ARD behaviour is active.
64 """
65 ndims: int = self.lengthscales.shape.ndims
66 return ndims > 0
67
68 def scale(self, X: TensorType) -> TensorType:
69 X_scaled = X / self.lengthscales if X is not None else X
70 return X_scaled
71
72 def K_diag(self, X: TensorType) -> tf.Tensor:
73 return tf.fill(tf.shape(X)[:-1], tf.squeeze(self.variance))
74
75
76 class IsotropicStationary(Stationary):
77 """
78 Base class for isotropic stationary kernels, i.e. kernels that only
79 depend on
80
81 r = ‖x - x'‖
82
83 Derived classes should implement one of:
84
85 K_r2(self, r2): Returns the kernel evaluated on r² (r2), which is the
86 squared scaled Euclidean distance Should operate element-wise on r2.
87
88 K_r(self, r): Returns the kernel evaluated on r, which is the scaled
89 Euclidean distance. Should operate element-wise on r.
90 """
91
92 def K(self, X: TensorType, X2: Optional[TensorType] = None) -> tf.Tensor:
93 r2 = self.scaled_squared_euclid_dist(X, X2)
94 return self.K_r2(r2)
95
96 def K_r2(self, r2: TensorType) -> tf.Tensor:
97 if hasattr(self, "K_r"):
98 # Clipping around the (single) float precision which is ~1e-45.
99 r = tf.sqrt(tf.maximum(r2, 1e-36))
100 return self.K_r(r) # pylint: disable=no-member
101 raise NotImplementedError
102
103 def scaled_squared_euclid_dist(
104 self, X: TensorType, X2: Optional[TensorType] = None
105 ) -> tf.Tensor:
106 """
107 Returns ‖(X - X2ᵀ) / ℓ‖², i.e. the squared L₂-norm.
108 """
109 return square_distance(self.scale(X), self.scale(X2))
110
111
112 class AnisotropicStationary(Stationary):
113 """
114 Base class for anisotropic stationary kernels, i.e. kernels that only
115 depend on
116
117 d = x - x'
118
119 Derived classes should implement K_d(self, d): Returns the kernel evaluated
120 on d, which is the pairwise difference matrix, scaled by the lengthscale
121 parameter ℓ (i.e. [(X - X2ᵀ) / ℓ]). The last axis corresponds to the
122 input dimension.
123 """
124
125 def K(self, X: TensorType, X2: Optional[TensorType] = None) -> tf.Tensor:
126 return self.K_d(self.scaled_difference_matrix(X, X2))
127
128 def scaled_difference_matrix(self, X: TensorType, X2: Optional[TensorType] = None) -> tf.Tensor:
129 """
130 Returns [(X - X2ᵀ) / ℓ]. If X has shape [..., N, D] and
131 X2 has shape [..., M, D], the output will have shape [..., N, M, D].
132 """
133 return difference_matrix(self.scale(X), self.scale(X2))
134
135 def K_d(self, d: TensorType) -> tf.Tensor:
136 raise NotImplementedError
137
138
139 class SquaredExponential(IsotropicStationary):
140 """
141 The radial basis function (RBF) or squared exponential kernel. The kernel equation is
142
143 k(r) = σ² exp{-½ r²}
144
145 where:
146 r is the Euclidean distance between the input points, scaled by the lengthscales parameter ℓ.
147 σ² is the variance parameter
148
149 Functions drawn from a GP with this kernel are infinitely differentiable!
150 """
151
152 def K_r2(self, r2: TensorType) -> tf.Tensor:
153 return self.variance * tf.exp(-0.5 * r2)
154
155
156 class RationalQuadratic(IsotropicStationary):
157 """
158 Rational Quadratic kernel,
159
160 k(r) = σ² (1 + r² / 2αℓ²)^(-α)
161
162 σ² : variance
163 ℓ : lengthscales
164 α : alpha, determines relative weighting of small-scale and large-scale fluctuations
165
166 For α → ∞, the RQ kernel becomes equivalent to the squared exponential.
167 """
168
169 def __init__(
170 self,
171 variance: TensorType = 1.0,
172 lengthscales: TensorType = 1.0,
173 alpha: TensorType = 1.0,
174 active_dims: Optional[ActiveDims] = None,
175 ) -> None:
176 super().__init__(variance=variance, lengthscales=lengthscales, active_dims=active_dims)
177 self.alpha = Parameter(alpha, transform=positive())
178
179 def K_r2(self, r2: TensorType) -> tf.Tensor:
180 return self.variance * (1 + 0.5 * r2 / self.alpha) ** (-self.alpha)
181
182
183 class Exponential(IsotropicStationary):
184 """
185 The Exponential kernel. It is equivalent to a Matern12 kernel with doubled lengthscales.
186 """
187
188 def K_r(self, r: TensorType) -> tf.Tensor:
189 return self.variance * tf.exp(-0.5 * r)
190
191
192 class Matern12(IsotropicStationary):
193 """
194 The Matern 1/2 kernel. Functions drawn from a GP with this kernel are not
195 differentiable anywhere. The kernel equation is
196
197 k(r) = σ² exp{-r}
198
199 where:
200 r is the Euclidean distance between the input points, scaled by the lengthscales parameter ℓ.
201 σ² is the variance parameter
202 """
203
204 def K_r(self, r: TensorType) -> tf.Tensor:
205 return self.variance * tf.exp(-r)
206
207
208 class Matern32(IsotropicStationary):
209 """
210 The Matern 3/2 kernel. Functions drawn from a GP with this kernel are once
211 differentiable. The kernel equation is
212
213 k(r) = σ² (1 + √3r) exp{-√3 r}
214
215 where:
216 r is the Euclidean distance between the input points, scaled by the lengthscales parameter ℓ,
217 σ² is the variance parameter.
218 """
219
220 def K_r(self, r: TensorType) -> tf.Tensor:
221 sqrt3 = np.sqrt(3.0)
222 return self.variance * (1.0 + sqrt3 * r) * tf.exp(-sqrt3 * r)
223
224
225 class Matern52(IsotropicStationary):
226 """
227 The Matern 5/2 kernel. Functions drawn from a GP with this kernel are twice
228 differentiable. The kernel equation is
229
230 k(r) = σ² (1 + √5r + 5/3r²) exp{-√5 r}
231
232 where:
233 r is the Euclidean distance between the input points, scaled by the lengthscales parameter ℓ,
234 σ² is the variance parameter.
235 """
236
237 def K_r(self, r: TensorType) -> tf.Tensor:
238 sqrt5 = np.sqrt(5.0)
239 return self.variance * (1.0 + sqrt5 * r + 5.0 / 3.0 * tf.square(r)) * tf.exp(-sqrt5 * r)
240
241
242 class Cosine(AnisotropicStationary):
243 """
244 The Cosine kernel. Functions drawn from a GP with this kernel are sinusoids
245 (with a random phase). The kernel equation is
246
247 k(r) = σ² cos{2πd}
248
249 where:
250 d is the sum of the per-dimension differences between the input points, scaled by the
251 lengthscale parameter ℓ (i.e. Σᵢ [(X - X2ᵀ) / ℓ]ᵢ),
252 σ² is the variance parameter.
253 """
254
255 def K_d(self, d: TensorType) -> tf.Tensor:
256 d = tf.reduce_sum(d, axis=-1)
257 return self.variance * tf.cos(2 * np.pi * d)
258
[end of gpflow/kernels/stationaries.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpflow/kernels/stationaries.py b/gpflow/kernels/stationaries.py
--- a/gpflow/kernels/stationaries.py
+++ b/gpflow/kernels/stationaries.py
@@ -41,7 +41,7 @@
:param variance: the (initial) value for the variance parameter.
:param lengthscales: the (initial) value for the lengthscale
parameter(s), to induce ARD behaviour this must be initialised as
- an array the same length as the the number of active dimensions
+ an array the same length as the number of active dimensions
e.g. [1., 1., 1.]. If only a single value is passed, this value
is used as the lengthscale of each dimension.
:param kwargs: accepts `name` and `active_dims`, which is a list or
@@ -122,6 +122,26 @@
input dimension.
"""
+ def __init__(
+ self, variance: TensorType = 1.0, lengthscales: TensorType = 1.0, **kwargs: Any
+ ) -> None:
+ """
+ :param variance: the (initial) value for the variance parameter.
+ :param lengthscales: the (initial) value for the lengthscale
+ parameter(s), to induce ARD behaviour this must be initialised as
+ an array the same length as the number of active dimensions
+ e.g. [1., 1., 1.]. Note that anisotropic kernels can possess
+ negative lengthscales. If only a single value is passed, this
+ value is used as the lengthscale of each dimension.
+ :param kwargs: accepts `name` and `active_dims`, which is a list or
+ slice of indices which controls which columns of X are used (by
+ default, all columns are used).
+ """
+ super().__init__(variance, lengthscales, **kwargs)
+
+ if self.ard:
+ self.lengthscales = Parameter(self.lengthscales.numpy())
+
def K(self, X: TensorType, X2: Optional[TensorType] = None) -> tf.Tensor:
return self.K_d(self.scaled_difference_matrix(X, X2))
| {"golden_diff": "diff --git a/gpflow/kernels/stationaries.py b/gpflow/kernels/stationaries.py\n--- a/gpflow/kernels/stationaries.py\n+++ b/gpflow/kernels/stationaries.py\n@@ -41,7 +41,7 @@\n :param variance: the (initial) value for the variance parameter.\n :param lengthscales: the (initial) value for the lengthscale\n parameter(s), to induce ARD behaviour this must be initialised as\n- an array the same length as the the number of active dimensions\n+ an array the same length as the number of active dimensions\n e.g. [1., 1., 1.]. If only a single value is passed, this value\n is used as the lengthscale of each dimension.\n :param kwargs: accepts `name` and `active_dims`, which is a list or\n@@ -122,6 +122,26 @@\n input dimension.\n \"\"\"\n \n+ def __init__(\n+ self, variance: TensorType = 1.0, lengthscales: TensorType = 1.0, **kwargs: Any\n+ ) -> None:\n+ \"\"\"\n+ :param variance: the (initial) value for the variance parameter.\n+ :param lengthscales: the (initial) value for the lengthscale\n+ parameter(s), to induce ARD behaviour this must be initialised as\n+ an array the same length as the number of active dimensions\n+ e.g. [1., 1., 1.]. Note that anisotropic kernels can possess\n+ negative lengthscales. If only a single value is passed, this\n+ value is used as the lengthscale of each dimension.\n+ :param kwargs: accepts `name` and `active_dims`, which is a list or\n+ slice of indices which controls which columns of X are used (by\n+ default, all columns are used).\n+ \"\"\"\n+ super().__init__(variance, lengthscales, **kwargs)\n+\n+ if self.ard:\n+ self.lengthscales = Parameter(self.lengthscales.numpy())\n+\n def K(self, X: TensorType, X2: Optional[TensorType] = None) -> tf.Tensor:\n return self.K_d(self.scaled_difference_matrix(X, X2))\n", "issue": "Anisotropic lengthscales\nCurrently, by default, all stationary kernels impose a positive transform on the lengthscales Parameter. For the case of AnisotropicStationary kernels, this reduces the viable parameter space by a factor of 2^(D-1), where D is the number of input dimensions. So it is acceptable in the case of a 1D problem, but in higher dimensions we would miss out. For example, for the Cosine kernel there is a significant difference between lengthscales of [+1, +1], and [+1, -1], they correspond to waves propagating in perpendicular directions. \r\n\n", "before_files": [{"content": "# Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Optional\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ..base import Parameter, TensorType\nfrom ..utilities import positive\nfrom ..utilities.ops import difference_matrix, square_distance\nfrom .base import ActiveDims, Kernel\n\n\nclass Stationary(Kernel):\n \"\"\"\n Base class for kernels that are stationary, that is, they only depend on\n\n d = x - x'\n\n This class handles 'ard' behaviour, which stands for 'Automatic Relevance\n Determination'. This means that the kernel has one lengthscale per\n dimension, otherwise the kernel is isotropic (has a single lengthscale).\n \"\"\"\n\n def __init__(\n self, variance: TensorType = 1.0, lengthscales: TensorType = 1.0, **kwargs: Any\n ) -> None:\n \"\"\"\n :param variance: the (initial) value for the variance parameter.\n :param lengthscales: the (initial) value for the lengthscale\n parameter(s), to induce ARD behaviour this must be initialised as\n an array the same length as the the number of active dimensions\n e.g. [1., 1., 1.]. If only a single value is passed, this value\n is used as the lengthscale of each dimension.\n :param kwargs: accepts `name` and `active_dims`, which is a list or\n slice of indices which controls which columns of X are used (by\n default, all columns are used).\n \"\"\"\n for kwarg in kwargs:\n if kwarg not in {\"name\", \"active_dims\"}:\n raise TypeError(f\"Unknown keyword argument: {kwarg}\")\n\n super().__init__(**kwargs)\n self.variance = Parameter(variance, transform=positive())\n self.lengthscales = Parameter(lengthscales, transform=positive())\n self._validate_ard_active_dims(self.lengthscales)\n\n @property\n def ard(self) -> bool:\n \"\"\"\n Whether ARD behaviour is active.\n \"\"\"\n ndims: int = self.lengthscales.shape.ndims\n return ndims > 0\n\n def scale(self, X: TensorType) -> TensorType:\n X_scaled = X / self.lengthscales if X is not None else X\n return X_scaled\n\n def K_diag(self, X: TensorType) -> tf.Tensor:\n return tf.fill(tf.shape(X)[:-1], tf.squeeze(self.variance))\n\n\nclass IsotropicStationary(Stationary):\n \"\"\"\n Base class for isotropic stationary kernels, i.e. kernels that only\n depend on\n\n r = \u2016x - x'\u2016\n\n Derived classes should implement one of:\n\n K_r2(self, r2): Returns the kernel evaluated on r\u00b2 (r2), which is the\n squared scaled Euclidean distance Should operate element-wise on r2.\n\n K_r(self, r): Returns the kernel evaluated on r, which is the scaled\n Euclidean distance. Should operate element-wise on r.\n \"\"\"\n\n def K(self, X: TensorType, X2: Optional[TensorType] = None) -> tf.Tensor:\n r2 = self.scaled_squared_euclid_dist(X, X2)\n return self.K_r2(r2)\n\n def K_r2(self, r2: TensorType) -> tf.Tensor:\n if hasattr(self, \"K_r\"):\n # Clipping around the (single) float precision which is ~1e-45.\n r = tf.sqrt(tf.maximum(r2, 1e-36))\n return self.K_r(r) # pylint: disable=no-member\n raise NotImplementedError\n\n def scaled_squared_euclid_dist(\n self, X: TensorType, X2: Optional[TensorType] = None\n ) -> tf.Tensor:\n \"\"\"\n Returns \u2016(X - X2\u1d40) / \u2113\u2016\u00b2, i.e. the squared L\u2082-norm.\n \"\"\"\n return square_distance(self.scale(X), self.scale(X2))\n\n\nclass AnisotropicStationary(Stationary):\n \"\"\"\n Base class for anisotropic stationary kernels, i.e. kernels that only\n depend on\n\n d = x - x'\n\n Derived classes should implement K_d(self, d): Returns the kernel evaluated\n on d, which is the pairwise difference matrix, scaled by the lengthscale\n parameter \u2113 (i.e. [(X - X2\u1d40) / \u2113]). The last axis corresponds to the\n input dimension.\n \"\"\"\n\n def K(self, X: TensorType, X2: Optional[TensorType] = None) -> tf.Tensor:\n return self.K_d(self.scaled_difference_matrix(X, X2))\n\n def scaled_difference_matrix(self, X: TensorType, X2: Optional[TensorType] = None) -> tf.Tensor:\n \"\"\"\n Returns [(X - X2\u1d40) / \u2113]. If X has shape [..., N, D] and\n X2 has shape [..., M, D], the output will have shape [..., N, M, D].\n \"\"\"\n return difference_matrix(self.scale(X), self.scale(X2))\n\n def K_d(self, d: TensorType) -> tf.Tensor:\n raise NotImplementedError\n\n\nclass SquaredExponential(IsotropicStationary):\n \"\"\"\n The radial basis function (RBF) or squared exponential kernel. The kernel equation is\n\n k(r) = \u03c3\u00b2 exp{-\u00bd r\u00b2}\n\n where:\n r is the Euclidean distance between the input points, scaled by the lengthscales parameter \u2113.\n \u03c3\u00b2 is the variance parameter\n\n Functions drawn from a GP with this kernel are infinitely differentiable!\n \"\"\"\n\n def K_r2(self, r2: TensorType) -> tf.Tensor:\n return self.variance * tf.exp(-0.5 * r2)\n\n\nclass RationalQuadratic(IsotropicStationary):\n \"\"\"\n Rational Quadratic kernel,\n\n k(r) = \u03c3\u00b2 (1 + r\u00b2 / 2\u03b1\u2113\u00b2)^(-\u03b1)\n\n \u03c3\u00b2 : variance\n \u2113 : lengthscales\n \u03b1 : alpha, determines relative weighting of small-scale and large-scale fluctuations\n\n For \u03b1 \u2192 \u221e, the RQ kernel becomes equivalent to the squared exponential.\n \"\"\"\n\n def __init__(\n self,\n variance: TensorType = 1.0,\n lengthscales: TensorType = 1.0,\n alpha: TensorType = 1.0,\n active_dims: Optional[ActiveDims] = None,\n ) -> None:\n super().__init__(variance=variance, lengthscales=lengthscales, active_dims=active_dims)\n self.alpha = Parameter(alpha, transform=positive())\n\n def K_r2(self, r2: TensorType) -> tf.Tensor:\n return self.variance * (1 + 0.5 * r2 / self.alpha) ** (-self.alpha)\n\n\nclass Exponential(IsotropicStationary):\n \"\"\"\n The Exponential kernel. It is equivalent to a Matern12 kernel with doubled lengthscales.\n \"\"\"\n\n def K_r(self, r: TensorType) -> tf.Tensor:\n return self.variance * tf.exp(-0.5 * r)\n\n\nclass Matern12(IsotropicStationary):\n \"\"\"\n The Matern 1/2 kernel. Functions drawn from a GP with this kernel are not\n differentiable anywhere. The kernel equation is\n\n k(r) = \u03c3\u00b2 exp{-r}\n\n where:\n r is the Euclidean distance between the input points, scaled by the lengthscales parameter \u2113.\n \u03c3\u00b2 is the variance parameter\n \"\"\"\n\n def K_r(self, r: TensorType) -> tf.Tensor:\n return self.variance * tf.exp(-r)\n\n\nclass Matern32(IsotropicStationary):\n \"\"\"\n The Matern 3/2 kernel. Functions drawn from a GP with this kernel are once\n differentiable. The kernel equation is\n\n k(r) = \u03c3\u00b2 (1 + \u221a3r) exp{-\u221a3 r}\n\n where:\n r is the Euclidean distance between the input points, scaled by the lengthscales parameter \u2113,\n \u03c3\u00b2 is the variance parameter.\n \"\"\"\n\n def K_r(self, r: TensorType) -> tf.Tensor:\n sqrt3 = np.sqrt(3.0)\n return self.variance * (1.0 + sqrt3 * r) * tf.exp(-sqrt3 * r)\n\n\nclass Matern52(IsotropicStationary):\n \"\"\"\n The Matern 5/2 kernel. Functions drawn from a GP with this kernel are twice\n differentiable. The kernel equation is\n\n k(r) = \u03c3\u00b2 (1 + \u221a5r + 5/3r\u00b2) exp{-\u221a5 r}\n\n where:\n r is the Euclidean distance between the input points, scaled by the lengthscales parameter \u2113,\n \u03c3\u00b2 is the variance parameter.\n \"\"\"\n\n def K_r(self, r: TensorType) -> tf.Tensor:\n sqrt5 = np.sqrt(5.0)\n return self.variance * (1.0 + sqrt5 * r + 5.0 / 3.0 * tf.square(r)) * tf.exp(-sqrt5 * r)\n\n\nclass Cosine(AnisotropicStationary):\n \"\"\"\n The Cosine kernel. Functions drawn from a GP with this kernel are sinusoids\n (with a random phase). The kernel equation is\n\n k(r) = \u03c3\u00b2 cos{2\u03c0d}\n\n where:\n d is the sum of the per-dimension differences between the input points, scaled by the\n lengthscale parameter \u2113 (i.e. \u03a3\u1d62 [(X - X2\u1d40) / \u2113]\u1d62),\n \u03c3\u00b2 is the variance parameter.\n \"\"\"\n\n def K_d(self, d: TensorType) -> tf.Tensor:\n d = tf.reduce_sum(d, axis=-1)\n return self.variance * tf.cos(2 * np.pi * d)\n", "path": "gpflow/kernels/stationaries.py"}]} | 3,651 | 501 |
gh_patches_debug_35660 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1709 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mismatch for docs in `perplexity`
## 📚 Documentation
I am working on the NLP models and find that the implementation of `perplexity` in https://github.com/Lightning-AI/torchmetrics/blob/e4df07109586d1f605e06aed85fbc6794e8ed833/src/torchmetrics/functional/text/perplexity.py#L114-L126 is probably wrong.
Since `_perplexity_update` in https://github.com/Lightning-AI/torchmetrics/blob/e4df07109586d1f605e06aed85fbc6794e8ed833/src/torchmetrics/functional/text/perplexity.py#L84-L88 execute the `softmax`, it indicates that the `preds` **should be a logit or a unnormalized score** and definitely NOT a log probability.
I wonder if this is intentioned move or is just a simple typo.
I also find the similar fix in issue #1366, which made the mistake. Should I make a PR to this?
</issue>
<code>
[start of src/torchmetrics/functional/text/perplexity.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Optional, Tuple
16
17 import torch
18 from torch import Tensor
19 from torch.nn import functional as F # noqa: N812
20
21 _TORCH_FLOAT_OR_DOUBLE = (torch.float32, torch.float64)
22
23
24 def _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> None:
25 """Check shape and type consistency of input vectors.
26
27 Args:
28 preds:
29 Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].
30 target:
31 Ground truth values with a shape [batch_size, seq_len].
32
33 Raises:
34 ValueError:
35 If ``preds`` tensor has no 3 dimensions.
36 ValueError:
37 If ``target`` tensor has no 2 dimensions.
38 ValueError:
39 If the first two dimensions of ``preds`` and ``target`` do not equal.
40 TypeError:
41 If ``preds`` dtype is not one of ``(torch.float16, torch.float32, torch.float64)``
42 TypeError:
43 If ``target`` is not of a type LongTensor (torch.int64)
44 """
45 if len(preds.shape) != 3:
46 raise ValueError(
47 "Input tensor `preds` is expected to have 3 dimensions, [batch_size, seq_len, vocab_size],"
48 f" but got {len(preds.shape)}."
49 )
50 if len(target.shape) != 2:
51 raise ValueError(
52 "Input tensor `target` is expected to have 2 dimensions, [batch_size, seq_len],"
53 f" but got {len(target.shape)}."
54 )
55 if preds.shape[:2] != target.shape:
56 raise ValueError(
57 "Input tensors `preds` and `target` are expected to have equaling first two dimensions,"
58 f" [batch_size, seq_len], but got {preds.shape[:2]} and {target.shape}."
59 )
60 if preds.dtype not in _TORCH_FLOAT_OR_DOUBLE:
61 raise TypeError(
62 f"Input tensor `preds` is expected to be of a type one of {_TORCH_FLOAT_OR_DOUBLE} but got {preds.dtype}."
63 )
64 if target.dtype != torch.int64:
65 raise TypeError(f"Input tensor `target` is expected to be of a type {torch.int64} but got {target.dtype}.")
66
67
68 def _perplexity_update(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tuple[Tensor, Tensor]:
69 """Compute intermediate statistics for Perplexity.
70
71 Args:
72 preds:
73 Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].
74 target:
75 Ground truth values with a shape [batch_size, seq_len].
76 ignore_index:
77 Integer specifying a target class to ignore. If given, this class index does not contribute
78 to the returned score.
79
80 Returns:
81 Log probabilities, summed over all samples
82 Number of samples
83 """
84 _check_shape_and_type_consistency(preds, target)
85
86 probs = F.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)
87 target = target.reshape(-1)
88
89 if ignore_index is not None:
90 mask = target.ne(ignore_index)
91 target = target.where(target != ignore_index, torch.tensor(0, device=target.device))
92 else:
93 mask = torch.ones_like(target, dtype=torch.bool)
94
95 probs = probs[:, target].diagonal()[mask]
96 total_log_probs = -probs.log().sum()
97 count = mask.sum()
98
99 return total_log_probs, count
100
101
102 def _perplexity_compute(total: Tensor, count: Tensor) -> Tensor:
103 """Compute the Perplexity.
104
105 Args:
106 total: Log probabilities, summed over all samples
107 count: Number of samples
108 Returns:
109 Perplexity
110 """
111 return torch.exp(total / count)
112
113
114 def perplexity(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tensor:
115 """Perplexity measures how well a language model predicts a text sample.
116
117 This metric is calculated as the average number of bits per word a model needs to represent the sample.
118
119 Args:
120 preds:
121 Log probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].
122 target:
123 Ground truth values with a shape [batch_size, seq_len].
124 ignore_index:
125 Integer specifying a target class to ignore. If given, this class index does not contribute
126 to the returned score.
127
128 Returns:
129 Perplexity value
130
131 Examples:
132 >>> import torch
133 >>> preds = torch.rand(2, 8, 5, generator=torch.manual_seed(22))
134 >>> target = torch.randint(5, (2, 8), generator=torch.manual_seed(22))
135 >>> target[0, 6:] = -100
136 >>> perplexity(preds, target, ignore_index=-100)
137 tensor(5.2545)
138 """
139 total, count = _perplexity_update(preds, target, ignore_index)
140 return _perplexity_compute(total, count)
141
[end of src/torchmetrics/functional/text/perplexity.py]
[start of src/torchmetrics/text/perplexity.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, Dict, Optional, Sequence, Union
16
17 from torch import Tensor, tensor
18
19 from torchmetrics.functional.text.perplexity import _perplexity_compute, _perplexity_update
20 from torchmetrics.metric import Metric
21 from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
22 from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
23
24 if not _MATPLOTLIB_AVAILABLE:
25 __doctest_skip__ = ["Perplexity.plot"]
26
27
28 class Perplexity(Metric):
29 r"""Perplexity measures how well a language model predicts a text sample.
30
31 It's calculated as the average number of bits per word a model needs to represent the sample.
32
33 As input to ``forward`` and ``update`` the metric accepts the following input:
34
35 - ``preds`` (:class:`~torch.Tensor`): Probabilities assigned to each token in a sequence with shape
36 [batch_size, seq_len, vocab_size]
37 - ``target`` (:class:`~torch.Tensor`): Ground truth values with a shape [batch_size, seq_len]
38
39 As output of ``forward`` and ``compute`` the metric returns the following output:
40
41 - ``perp`` (:class:`~torch.Tensor`): A tensor with the perplexity score
42
43 Args:
44 ignore_index: Integer specifying a target class to ignore.
45 If given, this class index does not contribute to the returned score.
46 kwargs:
47 Additional keyword arguments, see :ref:`Metric kwargs` for more info.
48
49 Examples:
50 >>> from torchmetrics.text import Perplexity
51 >>> import torch
52 >>> preds = torch.rand(2, 8, 5, generator=torch.manual_seed(22))
53 >>> target = torch.randint(5, (2, 8), generator=torch.manual_seed(22))
54 >>> target[0, 6:] = -100
55 >>> perp = Perplexity(ignore_index=-100)
56 >>> perp(preds, target)
57 tensor(5.2545)
58 """
59 is_differentiable = True
60 higher_is_better = False
61 full_state_update = False
62 total_log_probs: Tensor
63 count: Tensor
64
65 def __init__(
66 self,
67 ignore_index: Optional[int] = None,
68 **kwargs: Dict[str, Any],
69 ) -> None:
70 super().__init__(**kwargs)
71 if ignore_index is not None and not isinstance(ignore_index, int):
72 raise ValueError(f"Argument `ignore_index` expected to either be `None` or an `int` but got {ignore_index}")
73 self.ignore_index = ignore_index
74 self.add_state("total_log_probs", default=tensor(0.0), dist_reduce_fx="sum")
75 self.add_state("count", default=tensor(0.0), dist_reduce_fx="sum")
76
77 def update(self, preds: Tensor, target: Tensor) -> None:
78 """Update state with predictions and targets."""
79 total_log_probs, count = _perplexity_update(preds, target, self.ignore_index)
80 self.total_log_probs += total_log_probs
81 self.count += count
82
83 def compute(self) -> Tensor:
84 """Compute the Perplexity."""
85 return _perplexity_compute(self.total_log_probs, self.count)
86
87 def plot(
88 self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
89 ) -> _PLOT_OUT_TYPE:
90 """Plot a single or multiple values from the metric.
91
92 Args:
93 val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
94 If no value is provided, will automatically call `metric.compute` and plot that result.
95 ax: An matplotlib axis object. If provided will add plot to that axis
96
97 Returns:
98 Figure and Axes object
99
100 Raises:
101 ModuleNotFoundError:
102 If `matplotlib` is not installed
103
104 .. plot::
105 :scale: 75
106
107 >>> # Example plotting a single value
108 >>> import torch
109 >>> from torchmetrics.text import Perplexity
110 >>> metric = Perplexity()
111 >>> metric.update(torch.rand(2, 8, 5), torch.randint(5, (2, 8)))
112 >>> fig_, ax_ = metric.plot()
113
114 .. plot::
115 :scale: 75
116
117 >>> # Example plotting multiple values
118 >>> import torch
119 >>> from torchmetrics.text import Perplexity
120 >>> metric = Perplexity()
121 >>> values = [ ]
122 >>> for _ in range(10):
123 ... values.append(metric(torch.rand(2, 8, 5), torch.randint(5, (2, 8))))
124 >>> fig_, ax_ = metric.plot(values)
125 """
126 return self._plot(val, ax)
127
[end of src/torchmetrics/text/perplexity.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/torchmetrics/functional/text/perplexity.py b/src/torchmetrics/functional/text/perplexity.py
--- a/src/torchmetrics/functional/text/perplexity.py
+++ b/src/torchmetrics/functional/text/perplexity.py
@@ -26,7 +26,8 @@
Args:
preds:
- Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].
+ Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,
+ vocab_size]. Scores will be normalized internally using softmax.
target:
Ground truth values with a shape [batch_size, seq_len].
@@ -70,7 +71,8 @@
Args:
preds:
- Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].
+ Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,
+ vocab_size]. Scores will be normalized internally using softmax.
target:
Ground truth values with a shape [batch_size, seq_len].
ignore_index:
@@ -118,7 +120,8 @@
Args:
preds:
- Log probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].
+ Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,
+ vocab_size], which is the output of a language model. Scores will be normalized internally using softmax.
target:
Ground truth values with a shape [batch_size, seq_len].
ignore_index:
diff --git a/src/torchmetrics/text/perplexity.py b/src/torchmetrics/text/perplexity.py
--- a/src/torchmetrics/text/perplexity.py
+++ b/src/torchmetrics/text/perplexity.py
@@ -32,8 +32,9 @@
As input to ``forward`` and ``update`` the metric accepts the following input:
- - ``preds`` (:class:`~torch.Tensor`): Probabilities assigned to each token in a sequence with shape
- [batch_size, seq_len, vocab_size]
+ - ``preds`` (:class:`~torch.Tensor`): Logits or a unnormalized score assigned to each token in a sequence with shape
+ [batch_size, seq_len, vocab_size], which is the output of a language model. Scores will be normalized internally
+ using softmax.
- ``target`` (:class:`~torch.Tensor`): Ground truth values with a shape [batch_size, seq_len]
As output of ``forward`` and ``compute`` the metric returns the following output:
| {"golden_diff": "diff --git a/src/torchmetrics/functional/text/perplexity.py b/src/torchmetrics/functional/text/perplexity.py\n--- a/src/torchmetrics/functional/text/perplexity.py\n+++ b/src/torchmetrics/functional/text/perplexity.py\n@@ -26,7 +26,8 @@\n \n Args:\n preds:\n- Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].\n+ Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n+ vocab_size]. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n \n@@ -70,7 +71,8 @@\n \n Args:\n preds:\n- Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].\n+ Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n+ vocab_size]. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\n@@ -118,7 +120,8 @@\n \n Args:\n preds:\n- Log probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].\n+ Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n+ vocab_size], which is the output of a language model. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\ndiff --git a/src/torchmetrics/text/perplexity.py b/src/torchmetrics/text/perplexity.py\n--- a/src/torchmetrics/text/perplexity.py\n+++ b/src/torchmetrics/text/perplexity.py\n@@ -32,8 +32,9 @@\n \n As input to ``forward`` and ``update`` the metric accepts the following input:\n \n- - ``preds`` (:class:`~torch.Tensor`): Probabilities assigned to each token in a sequence with shape\n- [batch_size, seq_len, vocab_size]\n+ - ``preds`` (:class:`~torch.Tensor`): Logits or a unnormalized score assigned to each token in a sequence with shape\n+ [batch_size, seq_len, vocab_size], which is the output of a language model. Scores will be normalized internally\n+ using softmax.\n - ``target`` (:class:`~torch.Tensor`): Ground truth values with a shape [batch_size, seq_len]\n \n As output of ``forward`` and ``compute`` the metric returns the following output:\n", "issue": "mismatch for docs in `perplexity`\n## \ud83d\udcda Documentation\r\n\r\nI am working on the NLP models and find that the implementation of `perplexity` in https://github.com/Lightning-AI/torchmetrics/blob/e4df07109586d1f605e06aed85fbc6794e8ed833/src/torchmetrics/functional/text/perplexity.py#L114-L126 is probably wrong.\r\n\r\nSince `_perplexity_update` in https://github.com/Lightning-AI/torchmetrics/blob/e4df07109586d1f605e06aed85fbc6794e8ed833/src/torchmetrics/functional/text/perplexity.py#L84-L88 execute the `softmax`, it indicates that the `preds` **should be a logit or a unnormalized score** and definitely NOT a log probability.\r\n\r\nI wonder if this is intentioned move or is just a simple typo. \r\n\r\nI also find the similar fix in issue #1366, which made the mistake. Should I make a PR to this?\r\n\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import functional as F # noqa: N812\n\n_TORCH_FLOAT_OR_DOUBLE = (torch.float32, torch.float64)\n\n\ndef _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> None:\n \"\"\"Check shape and type consistency of input vectors.\n\n Args:\n preds:\n Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].\n target:\n Ground truth values with a shape [batch_size, seq_len].\n\n Raises:\n ValueError:\n If ``preds`` tensor has no 3 dimensions.\n ValueError:\n If ``target`` tensor has no 2 dimensions.\n ValueError:\n If the first two dimensions of ``preds`` and ``target`` do not equal.\n TypeError:\n If ``preds`` dtype is not one of ``(torch.float16, torch.float32, torch.float64)``\n TypeError:\n If ``target`` is not of a type LongTensor (torch.int64)\n \"\"\"\n if len(preds.shape) != 3:\n raise ValueError(\n \"Input tensor `preds` is expected to have 3 dimensions, [batch_size, seq_len, vocab_size],\"\n f\" but got {len(preds.shape)}.\"\n )\n if len(target.shape) != 2:\n raise ValueError(\n \"Input tensor `target` is expected to have 2 dimensions, [batch_size, seq_len],\"\n f\" but got {len(target.shape)}.\"\n )\n if preds.shape[:2] != target.shape:\n raise ValueError(\n \"Input tensors `preds` and `target` are expected to have equaling first two dimensions,\"\n f\" [batch_size, seq_len], but got {preds.shape[:2]} and {target.shape}.\"\n )\n if preds.dtype not in _TORCH_FLOAT_OR_DOUBLE:\n raise TypeError(\n f\"Input tensor `preds` is expected to be of a type one of {_TORCH_FLOAT_OR_DOUBLE} but got {preds.dtype}.\"\n )\n if target.dtype != torch.int64:\n raise TypeError(f\"Input tensor `target` is expected to be of a type {torch.int64} but got {target.dtype}.\")\n\n\ndef _perplexity_update(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tuple[Tensor, Tensor]:\n \"\"\"Compute intermediate statistics for Perplexity.\n\n Args:\n preds:\n Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\n Integer specifying a target class to ignore. If given, this class index does not contribute\n to the returned score.\n\n Returns:\n Log probabilities, summed over all samples\n Number of samples\n \"\"\"\n _check_shape_and_type_consistency(preds, target)\n\n probs = F.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)\n target = target.reshape(-1)\n\n if ignore_index is not None:\n mask = target.ne(ignore_index)\n target = target.where(target != ignore_index, torch.tensor(0, device=target.device))\n else:\n mask = torch.ones_like(target, dtype=torch.bool)\n\n probs = probs[:, target].diagonal()[mask]\n total_log_probs = -probs.log().sum()\n count = mask.sum()\n\n return total_log_probs, count\n\n\ndef _perplexity_compute(total: Tensor, count: Tensor) -> Tensor:\n \"\"\"Compute the Perplexity.\n\n Args:\n total: Log probabilities, summed over all samples\n count: Number of samples\n Returns:\n Perplexity\n \"\"\"\n return torch.exp(total / count)\n\n\ndef perplexity(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tensor:\n \"\"\"Perplexity measures how well a language model predicts a text sample.\n\n This metric is calculated as the average number of bits per word a model needs to represent the sample.\n\n Args:\n preds:\n Log probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\n Integer specifying a target class to ignore. If given, this class index does not contribute\n to the returned score.\n\n Returns:\n Perplexity value\n\n Examples:\n >>> import torch\n >>> preds = torch.rand(2, 8, 5, generator=torch.manual_seed(22))\n >>> target = torch.randint(5, (2, 8), generator=torch.manual_seed(22))\n >>> target[0, 6:] = -100\n >>> perplexity(preds, target, ignore_index=-100)\n tensor(5.2545)\n \"\"\"\n total, count = _perplexity_update(preds, target, ignore_index)\n return _perplexity_compute(total, count)\n", "path": "src/torchmetrics/functional/text/perplexity.py"}, {"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Dict, Optional, Sequence, Union\n\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.text.perplexity import _perplexity_compute, _perplexity_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"Perplexity.plot\"]\n\n\nclass Perplexity(Metric):\n r\"\"\"Perplexity measures how well a language model predicts a text sample.\n\n It's calculated as the average number of bits per word a model needs to represent the sample.\n\n As input to ``forward`` and ``update`` the metric accepts the following input:\n\n - ``preds`` (:class:`~torch.Tensor`): Probabilities assigned to each token in a sequence with shape\n [batch_size, seq_len, vocab_size]\n - ``target`` (:class:`~torch.Tensor`): Ground truth values with a shape [batch_size, seq_len]\n\n As output of ``forward`` and ``compute`` the metric returns the following output:\n\n - ``perp`` (:class:`~torch.Tensor`): A tensor with the perplexity score\n\n Args:\n ignore_index: Integer specifying a target class to ignore.\n If given, this class index does not contribute to the returned score.\n kwargs:\n Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Examples:\n >>> from torchmetrics.text import Perplexity\n >>> import torch\n >>> preds = torch.rand(2, 8, 5, generator=torch.manual_seed(22))\n >>> target = torch.randint(5, (2, 8), generator=torch.manual_seed(22))\n >>> target[0, 6:] = -100\n >>> perp = Perplexity(ignore_index=-100)\n >>> perp(preds, target)\n tensor(5.2545)\n \"\"\"\n is_differentiable = True\n higher_is_better = False\n full_state_update = False\n total_log_probs: Tensor\n count: Tensor\n\n def __init__(\n self,\n ignore_index: Optional[int] = None,\n **kwargs: Dict[str, Any],\n ) -> None:\n super().__init__(**kwargs)\n if ignore_index is not None and not isinstance(ignore_index, int):\n raise ValueError(f\"Argument `ignore_index` expected to either be `None` or an `int` but got {ignore_index}\")\n self.ignore_index = ignore_index\n self.add_state(\"total_log_probs\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"count\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update state with predictions and targets.\"\"\"\n total_log_probs, count = _perplexity_update(preds, target, self.ignore_index)\n self.total_log_probs += total_log_probs\n self.count += count\n\n def compute(self) -> Tensor:\n \"\"\"Compute the Perplexity.\"\"\"\n return _perplexity_compute(self.total_log_probs, self.count)\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.text import Perplexity\n >>> metric = Perplexity()\n >>> metric.update(torch.rand(2, 8, 5), torch.randint(5, (2, 8)))\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.text import Perplexity\n >>> metric = Perplexity()\n >>> values = [ ]\n >>> for _ in range(10):\n ... values.append(metric(torch.rand(2, 8, 5), torch.randint(5, (2, 8))))\n >>> fig_, ax_ = metric.plot(values)\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/text/perplexity.py"}]} | 3,838 | 592 |
gh_patches_debug_21007 | rasdani/github-patches | git_diff | joke2k__faker-213 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`.prefix`/`.suffix` returns a tuple instead of a single value
`.prefix` (and `.suffix`) can occasionally return a tuple of values instead of a single value when `prefixes_male` and `prefixes_female` (or `suffixes_*`) are present in the provider.
[See here for the code responsible.](https://github.com/joke2k/faker/blob/2af330e09d84306d10921fed00ad2e5cc8e3d36f/faker/providers/person/__init__.py#L93-L94)
I wasn't sure if this was intentional (it's documented to do so -- then again, the documentation is autogenerated, isn't it?), so I didn't make a PR yet, but it's certainly counterintuitive.
</issue>
<code>
[start of faker/providers/person/__init__.py]
1 localized = True
2
3 from .. import BaseProvider
4
5
6 class Provider(BaseProvider):
7 formats = ['{{first_name}} {{last_name}}', ]
8
9 first_names = ['John', 'Jane']
10
11 last_names = ['Doe', ]
12
13 def name(self):
14 """
15 :example 'John Doe'
16 """
17 pattern = self.random_element(self.formats)
18 return self.generator.parse(pattern)
19
20 @classmethod
21 def first_name(cls):
22 return cls.random_element(cls.first_names)
23
24 @classmethod
25 def last_name(cls):
26 return cls.random_element(cls.last_names)
27
28 def name_male(self):
29 if hasattr(self, 'formats_male'):
30 formats = self.formats_male
31 else:
32 formats = self.formats
33 pattern = self.random_element(formats)
34 return self.generator.parse(pattern)
35
36 def name_female(self):
37 if hasattr(self, 'formats_female'):
38 formats = self.formats_female
39 else:
40 formats = self.formats
41 pattern = self.random_element(formats)
42 return self.generator.parse(pattern)
43
44 @classmethod
45 def first_name_male(cls):
46 if hasattr(cls, 'first_names_male'):
47 return cls.random_element(cls.first_names_male)
48 return cls.first_name()
49
50 @classmethod
51 def first_name_female(cls):
52 if hasattr(cls, 'first_names_female'):
53 return cls.random_element(cls.first_names_female)
54 return cls.first_name()
55
56 @classmethod
57 def last_name_male(cls):
58 if hasattr(cls, 'last_names_male'):
59 return cls.random_element(cls.last_names_male)
60 return cls.last_name()
61
62 @classmethod
63 def last_name_female(cls):
64 if hasattr(cls, 'last_names_female'):
65 return cls.random_element(cls.last_names_female)
66 return cls.last_name()
67
68
69 @classmethod
70 def prefix(cls):
71 if hasattr(cls, 'prefixes'):
72 return cls.random_element(cls.prefixes)
73 if hasattr(cls, 'prefixes_male') and hasattr(cls, 'prefixes_female'):
74 return cls.random_element((cls.prefixes_male, cls.prefixes_female))
75 return ''
76
77 @classmethod
78 def prefix_male(cls):
79 if hasattr(cls, 'prefixes_male'):
80 return cls.random_element(cls.prefixes_male)
81 return cls.prefix()
82
83 @classmethod
84 def prefix_female(cls):
85 if hasattr(cls, 'prefixes_female'):
86 return cls.random_element(cls.prefixes_female)
87 return cls.prefix()
88
89 @classmethod
90 def suffix(cls):
91 if hasattr(cls, 'suffixes'):
92 return cls.random_element(cls.suffixes)
93 if hasattr(cls, 'suffixes_male') and hasattr(cls, 'suffixes_female'):
94 return cls.random_element((cls.suffixes_male, cls.suffixes_female))
95 return ''
96
97 @classmethod
98 def suffix_male(cls):
99 if hasattr(cls, 'suffixes_male'):
100 return cls.random_element(cls.suffixes_male)
101 return cls.suffix()
102
103 @classmethod
104 def suffix_female(cls):
105 if hasattr(cls, 'suffixes_female'):
106 return cls.random_element(cls.suffixes_female)
107 return cls.suffix()
108
[end of faker/providers/person/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/person/__init__.py b/faker/providers/person/__init__.py
--- a/faker/providers/person/__init__.py
+++ b/faker/providers/person/__init__.py
@@ -71,7 +71,8 @@
if hasattr(cls, 'prefixes'):
return cls.random_element(cls.prefixes)
if hasattr(cls, 'prefixes_male') and hasattr(cls, 'prefixes_female'):
- return cls.random_element((cls.prefixes_male, cls.prefixes_female))
+ prefixes = cls.random_element((cls.prefixes_male, cls.prefixes_female))
+ return cls.random_element(prefixes)
return ''
@classmethod
@@ -91,7 +92,8 @@
if hasattr(cls, 'suffixes'):
return cls.random_element(cls.suffixes)
if hasattr(cls, 'suffixes_male') and hasattr(cls, 'suffixes_female'):
- return cls.random_element((cls.suffixes_male, cls.suffixes_female))
+ suffixes = cls.random_element((cls.suffixes_male, cls.suffixes_female))
+ return cls.random_element(suffixes)
return ''
@classmethod
| {"golden_diff": "diff --git a/faker/providers/person/__init__.py b/faker/providers/person/__init__.py\n--- a/faker/providers/person/__init__.py\n+++ b/faker/providers/person/__init__.py\n@@ -71,7 +71,8 @@\n if hasattr(cls, 'prefixes'):\n return cls.random_element(cls.prefixes)\n if hasattr(cls, 'prefixes_male') and hasattr(cls, 'prefixes_female'):\n- return cls.random_element((cls.prefixes_male, cls.prefixes_female))\n+ prefixes = cls.random_element((cls.prefixes_male, cls.prefixes_female))\n+ return cls.random_element(prefixes)\n return ''\n \n @classmethod\n@@ -91,7 +92,8 @@\n if hasattr(cls, 'suffixes'):\n return cls.random_element(cls.suffixes)\n if hasattr(cls, 'suffixes_male') and hasattr(cls, 'suffixes_female'):\n- return cls.random_element((cls.suffixes_male, cls.suffixes_female))\n+ suffixes = cls.random_element((cls.suffixes_male, cls.suffixes_female))\n+ return cls.random_element(suffixes)\n return ''\n \n @classmethod\n", "issue": "`.prefix`/`.suffix` returns a tuple instead of a single value\n`.prefix` (and `.suffix`) can occasionally return a tuple of values instead of a single value when `prefixes_male` and `prefixes_female` (or `suffixes_*`) are present in the provider.\n\n[See here for the code responsible.](https://github.com/joke2k/faker/blob/2af330e09d84306d10921fed00ad2e5cc8e3d36f/faker/providers/person/__init__.py#L93-L94)\n\nI wasn't sure if this was intentional (it's documented to do so -- then again, the documentation is autogenerated, isn't it?), so I didn't make a PR yet, but it's certainly counterintuitive.\n\n", "before_files": [{"content": "localized = True\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n formats = ['{{first_name}} {{last_name}}', ]\n\n first_names = ['John', 'Jane']\n\n last_names = ['Doe', ]\n\n def name(self):\n \"\"\"\n :example 'John Doe'\n \"\"\"\n pattern = self.random_element(self.formats)\n return self.generator.parse(pattern)\n\n @classmethod\n def first_name(cls):\n return cls.random_element(cls.first_names)\n\n @classmethod\n def last_name(cls):\n return cls.random_element(cls.last_names)\n\n def name_male(self):\n if hasattr(self, 'formats_male'):\n formats = self.formats_male\n else:\n formats = self.formats\n pattern = self.random_element(formats)\n return self.generator.parse(pattern)\n\n def name_female(self):\n if hasattr(self, 'formats_female'):\n formats = self.formats_female\n else:\n formats = self.formats\n pattern = self.random_element(formats)\n return self.generator.parse(pattern)\n\n @classmethod\n def first_name_male(cls):\n if hasattr(cls, 'first_names_male'):\n return cls.random_element(cls.first_names_male)\n return cls.first_name()\n\n @classmethod\n def first_name_female(cls):\n if hasattr(cls, 'first_names_female'):\n return cls.random_element(cls.first_names_female)\n return cls.first_name()\n\n @classmethod\n def last_name_male(cls):\n if hasattr(cls, 'last_names_male'):\n return cls.random_element(cls.last_names_male)\n return cls.last_name()\n\n @classmethod\n def last_name_female(cls):\n if hasattr(cls, 'last_names_female'):\n return cls.random_element(cls.last_names_female)\n return cls.last_name()\n\n\n @classmethod\n def prefix(cls):\n if hasattr(cls, 'prefixes'):\n return cls.random_element(cls.prefixes)\n if hasattr(cls, 'prefixes_male') and hasattr(cls, 'prefixes_female'):\n return cls.random_element((cls.prefixes_male, cls.prefixes_female))\n return ''\n\n @classmethod\n def prefix_male(cls):\n if hasattr(cls, 'prefixes_male'):\n return cls.random_element(cls.prefixes_male)\n return cls.prefix()\n\n @classmethod\n def prefix_female(cls):\n if hasattr(cls, 'prefixes_female'):\n return cls.random_element(cls.prefixes_female)\n return cls.prefix()\n\n @classmethod\n def suffix(cls):\n if hasattr(cls, 'suffixes'):\n return cls.random_element(cls.suffixes)\n if hasattr(cls, 'suffixes_male') and hasattr(cls, 'suffixes_female'):\n return cls.random_element((cls.suffixes_male, cls.suffixes_female))\n return ''\n\n @classmethod\n def suffix_male(cls):\n if hasattr(cls, 'suffixes_male'):\n return cls.random_element(cls.suffixes_male)\n return cls.suffix()\n\n @classmethod\n def suffix_female(cls):\n if hasattr(cls, 'suffixes_female'):\n return cls.random_element(cls.suffixes_female)\n return cls.suffix()\n", "path": "faker/providers/person/__init__.py"}]} | 1,596 | 252 |
gh_patches_debug_5043 | rasdani/github-patches | git_diff | dask__distributed-3652 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Variable delete raises KeyError
It seems that there is an unhandled KeyError in the scheduler variable delete logic.
If using the asynchronous API only visible in the scheduler logs but it does mean that Variables will be "leaking" and staying around instead of being deleted.
It happens regardless of whether you set a value.
Shortest reproduction I can provide is as follows:
```python
from distributed import Client
from distributed import Variable
def main():
Client()
Variable().delete()
if __name__ == '__main__':
main()
```
```python
tornado.application - ERROR - Exception in callback functools.partial(<bound method IOLoop._discard_future_result of <tornado.platform.asyncio.AsyncIOLoop object at 0x7f8fe9265be0>>, <Task finished coro=<VariableExtension.delete() done, defined at ./venv/lib/python3.6/site-packages/distributed/variable.py:101> exception=KeyError('variable-68a51209410248fa9f69f84b25f48343',)>)
Traceback (most recent call last):
File "./venv/lib/python3.6/site-packages/tornado/ioloop.py", line 743, in _run_callback
ret = callback()
File "./venv/lib/python3.6/site-packages/tornado/ioloop.py", line 767, in _discard_future_result
future.result()
File "./venv/lib/python3.6/site-packages/distributed/variable.py", line 110, in delete
del self.waiting_conditions[name]
KeyError: 'variable-68a51209410248fa9f69f84b25f48343'
```
Python 3.6.10
Reproduced with distributed 2.9.3 and 2.10.0
</issue>
<code>
[start of distributed/variable.py]
1 import asyncio
2 from collections import defaultdict
3 import logging
4 import uuid
5
6 from tlz import merge
7
8 from .client import Future, _get_global_client, Client
9 from .utils import tokey, log_errors, TimeoutError, ignoring
10 from .worker import get_client
11
12 logger = logging.getLogger(__name__)
13
14
15 class VariableExtension:
16 """ An extension for the scheduler to manage queues
17
18 This adds the following routes to the scheduler
19
20 * variable-set
21 * variable-get
22 * variable-delete
23 """
24
25 def __init__(self, scheduler):
26 self.scheduler = scheduler
27 self.variables = dict()
28 self.waiting = defaultdict(set)
29 self.waiting_conditions = defaultdict(asyncio.Condition)
30 self.started = asyncio.Condition()
31
32 self.scheduler.handlers.update(
33 {"variable_set": self.set, "variable_get": self.get}
34 )
35
36 self.scheduler.stream_handlers["variable-future-release"] = self.future_release
37 self.scheduler.stream_handlers["variable_delete"] = self.delete
38
39 self.scheduler.extensions["variables"] = self
40
41 async def set(self, stream=None, name=None, key=None, data=None, client=None):
42 if key is not None:
43 record = {"type": "Future", "value": key}
44 self.scheduler.client_desires_keys(keys=[key], client="variable-%s" % name)
45 else:
46 record = {"type": "msgpack", "value": data}
47 try:
48 old = self.variables[name]
49 except KeyError:
50 pass
51 else:
52 if old["type"] == "Future" and old["value"] != key:
53 asyncio.ensure_future(self.release(old["value"], name))
54 if name not in self.variables:
55 async with self.started:
56 self.started.notify_all()
57 self.variables[name] = record
58
59 async def release(self, key, name):
60 while self.waiting[key, name]:
61 async with self.waiting_conditions[name]:
62 await self.waiting_conditions[name].wait()
63
64 self.scheduler.client_releases_keys(keys=[key], client="variable-%s" % name)
65 del self.waiting[key, name]
66
67 async def future_release(self, name=None, key=None, token=None, client=None):
68 self.waiting[key, name].remove(token)
69 if not self.waiting[key, name]:
70 async with self.waiting_conditions[name]:
71 self.waiting_conditions[name].notify_all()
72
73 async def get(self, stream=None, name=None, client=None, timeout=None):
74 start = self.scheduler.loop.time()
75 while name not in self.variables:
76 if timeout is not None:
77 left = timeout - (self.scheduler.loop.time() - start)
78 else:
79 left = None
80 if left and left < 0:
81 raise TimeoutError()
82 try:
83
84 async def _(): # Python 3.6 is odd and requires special help here
85 await self.started.acquire()
86 await self.started.wait()
87
88 await asyncio.wait_for(_(), timeout=left)
89 finally:
90 with ignoring(RuntimeError): # Python 3.6 loses lock on finally clause
91 self.started.release()
92
93 record = self.variables[name]
94 if record["type"] == "Future":
95 key = record["value"]
96 token = uuid.uuid4().hex
97 ts = self.scheduler.tasks.get(key)
98 state = ts.state if ts is not None else "lost"
99 msg = {"token": token, "state": state}
100 if state == "erred":
101 msg["exception"] = ts.exception_blame.exception
102 msg["traceback"] = ts.exception_blame.traceback
103 record = merge(record, msg)
104 self.waiting[key, name].add(token)
105 return record
106
107 async def delete(self, stream=None, name=None, client=None):
108 with log_errors():
109 try:
110 old = self.variables[name]
111 except KeyError:
112 pass
113 else:
114 if old["type"] == "Future":
115 await self.release(old["value"], name)
116 del self.waiting_conditions[name]
117 del self.variables[name]
118
119
120 class Variable:
121 """ Distributed Global Variable
122
123 This allows multiple clients to share futures and data between each other
124 with a single mutable variable. All metadata is sequentialized through the
125 scheduler. Race conditions can occur.
126
127 Values must be either Futures or msgpack-encodable data (ints, lists,
128 strings, etc..) All data will be kept and sent through the scheduler, so
129 it is wise not to send too much. If you want to share a large amount of
130 data then ``scatter`` it and share the future instead.
131
132 .. warning::
133
134 This object is experimental and has known issues in Python 2
135
136 Parameters
137 ----------
138 name: string (optional)
139 Name used by other clients and the scheduler to identify the variable.
140 If not given, a random name will be generated.
141 client: Client (optional)
142 Client used for communication with the scheduler. Defaults to the
143 value of ``_get_global_client()``.
144
145 Examples
146 --------
147 >>> from dask.distributed import Client, Variable # doctest: +SKIP
148 >>> client = Client() # doctest: +SKIP
149 >>> x = Variable('x') # doctest: +SKIP
150 >>> x.set(123) # docttest: +SKIP
151 >>> x.get() # docttest: +SKIP
152 123
153 >>> future = client.submit(f, x) # doctest: +SKIP
154 >>> x.set(future) # doctest: +SKIP
155
156 See Also
157 --------
158 Queue: shared multi-producer/multi-consumer queue between clients
159 """
160
161 def __init__(self, name=None, client=None, maxsize=0):
162 self.client = client or _get_global_client()
163 self.name = name or "variable-" + uuid.uuid4().hex
164
165 async def _set(self, value):
166 if isinstance(value, Future):
167 await self.client.scheduler.variable_set(
168 key=tokey(value.key), name=self.name
169 )
170 else:
171 await self.client.scheduler.variable_set(data=value, name=self.name)
172
173 def set(self, value, **kwargs):
174 """ Set the value of this variable
175
176 Parameters
177 ----------
178 value: Future or object
179 Must be either a Future or a msgpack-encodable value
180 """
181 return self.client.sync(self._set, value, **kwargs)
182
183 async def _get(self, timeout=None):
184 d = await self.client.scheduler.variable_get(
185 timeout=timeout, name=self.name, client=self.client.id
186 )
187 if d["type"] == "Future":
188 value = Future(d["value"], self.client, inform=True, state=d["state"])
189 if d["state"] == "erred":
190 value._state.set_error(d["exception"], d["traceback"])
191 self.client._send_to_scheduler(
192 {
193 "op": "variable-future-release",
194 "name": self.name,
195 "key": d["value"],
196 "token": d["token"],
197 }
198 )
199 else:
200 value = d["value"]
201 return value
202
203 def get(self, timeout=None, **kwargs):
204 """ Get the value of this variable """
205 return self.client.sync(self._get, timeout=timeout, **kwargs)
206
207 def delete(self):
208 """ Delete this variable
209
210 Caution, this affects all clients currently pointing to this variable.
211 """
212 if self.client.status == "running": # TODO: can leave zombie futures
213 self.client._send_to_scheduler({"op": "variable_delete", "name": self.name})
214
215 def __getstate__(self):
216 return (self.name, self.client.scheduler.address)
217
218 def __setstate__(self, state):
219 name, address = state
220 try:
221 client = get_client(address)
222 assert client.scheduler.address == address
223 except (AttributeError, AssertionError):
224 client = Client(address, set_as_default=False)
225 self.__init__(name=name, client=client)
226
[end of distributed/variable.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/variable.py b/distributed/variable.py
--- a/distributed/variable.py
+++ b/distributed/variable.py
@@ -113,8 +113,10 @@
else:
if old["type"] == "Future":
await self.release(old["value"], name)
- del self.waiting_conditions[name]
- del self.variables[name]
+ with ignoring(KeyError):
+ del self.waiting_conditions[name]
+ with ignoring(KeyError):
+ del self.variables[name]
class Variable:
| {"golden_diff": "diff --git a/distributed/variable.py b/distributed/variable.py\n--- a/distributed/variable.py\n+++ b/distributed/variable.py\n@@ -113,8 +113,10 @@\n else:\n if old[\"type\"] == \"Future\":\n await self.release(old[\"value\"], name)\n- del self.waiting_conditions[name]\n- del self.variables[name]\n+ with ignoring(KeyError):\n+ del self.waiting_conditions[name]\n+ with ignoring(KeyError):\n+ del self.variables[name]\n \n \n class Variable:\n", "issue": "Variable delete raises KeyError\nIt seems that there is an unhandled KeyError in the scheduler variable delete logic.\r\n\r\nIf using the asynchronous API only visible in the scheduler logs but it does mean that Variables will be \"leaking\" and staying around instead of being deleted.\r\n\r\nIt happens regardless of whether you set a value.\r\n\r\nShortest reproduction I can provide is as follows:\r\n```python\r\nfrom distributed import Client\r\nfrom distributed import Variable\r\n\r\n\r\ndef main():\r\n Client()\r\n Variable().delete()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n```\r\n\r\n```python\r\ntornado.application - ERROR - Exception in callback functools.partial(<bound method IOLoop._discard_future_result of <tornado.platform.asyncio.AsyncIOLoop object at 0x7f8fe9265be0>>, <Task finished coro=<VariableExtension.delete() done, defined at ./venv/lib/python3.6/site-packages/distributed/variable.py:101> exception=KeyError('variable-68a51209410248fa9f69f84b25f48343',)>)\r\nTraceback (most recent call last):\r\n File \"./venv/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\r\n ret = callback()\r\n File \"./venv/lib/python3.6/site-packages/tornado/ioloop.py\", line 767, in _discard_future_result\r\n future.result()\r\n File \"./venv/lib/python3.6/site-packages/distributed/variable.py\", line 110, in delete\r\n del self.waiting_conditions[name]\r\nKeyError: 'variable-68a51209410248fa9f69f84b25f48343'\r\n```\r\n\r\nPython 3.6.10\r\nReproduced with distributed 2.9.3 and 2.10.0\n", "before_files": [{"content": "import asyncio\nfrom collections import defaultdict\nimport logging\nimport uuid\n\nfrom tlz import merge\n\nfrom .client import Future, _get_global_client, Client\nfrom .utils import tokey, log_errors, TimeoutError, ignoring\nfrom .worker import get_client\n\nlogger = logging.getLogger(__name__)\n\n\nclass VariableExtension:\n \"\"\" An extension for the scheduler to manage queues\n\n This adds the following routes to the scheduler\n\n * variable-set\n * variable-get\n * variable-delete\n \"\"\"\n\n def __init__(self, scheduler):\n self.scheduler = scheduler\n self.variables = dict()\n self.waiting = defaultdict(set)\n self.waiting_conditions = defaultdict(asyncio.Condition)\n self.started = asyncio.Condition()\n\n self.scheduler.handlers.update(\n {\"variable_set\": self.set, \"variable_get\": self.get}\n )\n\n self.scheduler.stream_handlers[\"variable-future-release\"] = self.future_release\n self.scheduler.stream_handlers[\"variable_delete\"] = self.delete\n\n self.scheduler.extensions[\"variables\"] = self\n\n async def set(self, stream=None, name=None, key=None, data=None, client=None):\n if key is not None:\n record = {\"type\": \"Future\", \"value\": key}\n self.scheduler.client_desires_keys(keys=[key], client=\"variable-%s\" % name)\n else:\n record = {\"type\": \"msgpack\", \"value\": data}\n try:\n old = self.variables[name]\n except KeyError:\n pass\n else:\n if old[\"type\"] == \"Future\" and old[\"value\"] != key:\n asyncio.ensure_future(self.release(old[\"value\"], name))\n if name not in self.variables:\n async with self.started:\n self.started.notify_all()\n self.variables[name] = record\n\n async def release(self, key, name):\n while self.waiting[key, name]:\n async with self.waiting_conditions[name]:\n await self.waiting_conditions[name].wait()\n\n self.scheduler.client_releases_keys(keys=[key], client=\"variable-%s\" % name)\n del self.waiting[key, name]\n\n async def future_release(self, name=None, key=None, token=None, client=None):\n self.waiting[key, name].remove(token)\n if not self.waiting[key, name]:\n async with self.waiting_conditions[name]:\n self.waiting_conditions[name].notify_all()\n\n async def get(self, stream=None, name=None, client=None, timeout=None):\n start = self.scheduler.loop.time()\n while name not in self.variables:\n if timeout is not None:\n left = timeout - (self.scheduler.loop.time() - start)\n else:\n left = None\n if left and left < 0:\n raise TimeoutError()\n try:\n\n async def _(): # Python 3.6 is odd and requires special help here\n await self.started.acquire()\n await self.started.wait()\n\n await asyncio.wait_for(_(), timeout=left)\n finally:\n with ignoring(RuntimeError): # Python 3.6 loses lock on finally clause\n self.started.release()\n\n record = self.variables[name]\n if record[\"type\"] == \"Future\":\n key = record[\"value\"]\n token = uuid.uuid4().hex\n ts = self.scheduler.tasks.get(key)\n state = ts.state if ts is not None else \"lost\"\n msg = {\"token\": token, \"state\": state}\n if state == \"erred\":\n msg[\"exception\"] = ts.exception_blame.exception\n msg[\"traceback\"] = ts.exception_blame.traceback\n record = merge(record, msg)\n self.waiting[key, name].add(token)\n return record\n\n async def delete(self, stream=None, name=None, client=None):\n with log_errors():\n try:\n old = self.variables[name]\n except KeyError:\n pass\n else:\n if old[\"type\"] == \"Future\":\n await self.release(old[\"value\"], name)\n del self.waiting_conditions[name]\n del self.variables[name]\n\n\nclass Variable:\n \"\"\" Distributed Global Variable\n\n This allows multiple clients to share futures and data between each other\n with a single mutable variable. All metadata is sequentialized through the\n scheduler. Race conditions can occur.\n\n Values must be either Futures or msgpack-encodable data (ints, lists,\n strings, etc..) All data will be kept and sent through the scheduler, so\n it is wise not to send too much. If you want to share a large amount of\n data then ``scatter`` it and share the future instead.\n\n .. warning::\n\n This object is experimental and has known issues in Python 2\n\n Parameters\n ----------\n name: string (optional)\n Name used by other clients and the scheduler to identify the variable.\n If not given, a random name will be generated.\n client: Client (optional)\n Client used for communication with the scheduler. Defaults to the\n value of ``_get_global_client()``.\n\n Examples\n --------\n >>> from dask.distributed import Client, Variable # doctest: +SKIP\n >>> client = Client() # doctest: +SKIP\n >>> x = Variable('x') # doctest: +SKIP\n >>> x.set(123) # docttest: +SKIP\n >>> x.get() # docttest: +SKIP\n 123\n >>> future = client.submit(f, x) # doctest: +SKIP\n >>> x.set(future) # doctest: +SKIP\n\n See Also\n --------\n Queue: shared multi-producer/multi-consumer queue between clients\n \"\"\"\n\n def __init__(self, name=None, client=None, maxsize=0):\n self.client = client or _get_global_client()\n self.name = name or \"variable-\" + uuid.uuid4().hex\n\n async def _set(self, value):\n if isinstance(value, Future):\n await self.client.scheduler.variable_set(\n key=tokey(value.key), name=self.name\n )\n else:\n await self.client.scheduler.variable_set(data=value, name=self.name)\n\n def set(self, value, **kwargs):\n \"\"\" Set the value of this variable\n\n Parameters\n ----------\n value: Future or object\n Must be either a Future or a msgpack-encodable value\n \"\"\"\n return self.client.sync(self._set, value, **kwargs)\n\n async def _get(self, timeout=None):\n d = await self.client.scheduler.variable_get(\n timeout=timeout, name=self.name, client=self.client.id\n )\n if d[\"type\"] == \"Future\":\n value = Future(d[\"value\"], self.client, inform=True, state=d[\"state\"])\n if d[\"state\"] == \"erred\":\n value._state.set_error(d[\"exception\"], d[\"traceback\"])\n self.client._send_to_scheduler(\n {\n \"op\": \"variable-future-release\",\n \"name\": self.name,\n \"key\": d[\"value\"],\n \"token\": d[\"token\"],\n }\n )\n else:\n value = d[\"value\"]\n return value\n\n def get(self, timeout=None, **kwargs):\n \"\"\" Get the value of this variable \"\"\"\n return self.client.sync(self._get, timeout=timeout, **kwargs)\n\n def delete(self):\n \"\"\" Delete this variable\n\n Caution, this affects all clients currently pointing to this variable.\n \"\"\"\n if self.client.status == \"running\": # TODO: can leave zombie futures\n self.client._send_to_scheduler({\"op\": \"variable_delete\", \"name\": self.name})\n\n def __getstate__(self):\n return (self.name, self.client.scheduler.address)\n\n def __setstate__(self, state):\n name, address = state\n try:\n client = get_client(address)\n assert client.scheduler.address == address\n except (AttributeError, AssertionError):\n client = Client(address, set_as_default=False)\n self.__init__(name=name, client=client)\n", "path": "distributed/variable.py"}]} | 3,263 | 120 |
gh_patches_debug_23955 | rasdani/github-patches | git_diff | arviz-devs__arviz-615 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Return value of plot_pair is numvars the same
The return value of `plot_pair` is a list that contains `numvars` times the array of subplots. I guess it comes from the original version with gridspec.
Line 168 defines `axs` empty list, which is no longer used, line 219 appends `ax` to `axs`, but `ax` already contains all the subplots. Eventually, `axs` is returned instead of `ax`.
In addition, maybe the docstring should be updated to specify that `ax` (both as input and as return value) is actually an array of axes, not an axes object. It cannot be a single axes because there are many, but I am not sure if everyone will see it right away.
</issue>
<code>
[start of arviz/plots/pairplot.py]
1 """Plot a scatter or hexbin of sampled parameters."""
2 import numpy as np
3 import matplotlib.pyplot as plt
4 from matplotlib.ticker import NullFormatter
5 from mpl_toolkits.axes_grid1 import make_axes_locatable
6
7 from ..data import convert_to_dataset
8 from .kdeplot import plot_kde
9 from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords
10 from ..utils import _var_names
11
12
13 def plot_pair(
14 data,
15 var_names=None,
16 coords=None,
17 figsize=None,
18 textsize=None,
19 kind="scatter",
20 gridsize="auto",
21 contour=True,
22 fill_last=True,
23 divergences=False,
24 colorbar=False,
25 ax=None,
26 divergences_kwargs=None,
27 plot_kwargs=None,
28 ):
29 """
30 Plot a scatter or hexbin matrix of the sampled parameters.
31
32 Parameters
33 ----------
34 data : obj
35 Any object that can be converted to an az.InferenceData object
36 Refer to documentation of az.convert_to_dataset for details
37 var_names : list of variable names
38 Variables to be plotted, if None all variable are plotted
39 coords : mapping, optional
40 Coordinates of var_names to be plotted. Passed to `Dataset.sel`
41 figsize : figure size tuple
42 If None, size is (8 + numvars, 8 + numvars)
43 textsize: int
44 Text size for labels. If None it will be autoscaled based on figsize.
45 kind : str
46 Type of plot to display (kde or hexbin)
47 gridsize : int or (int, int), optional
48 Only works for kind=hexbin.
49 The number of hexagons in the x-direction. The corresponding number of hexagons in the
50 y-direction is chosen such that the hexagons are approximately regular.
51 Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
52 in the x-direction and the y-direction.
53 contour : bool
54 If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
55 fill_last : bool
56 If True fill the last contour of the 2D KDE plot. Defaults to True.
57 divergences : Boolean
58 If True divergences will be plotted in a different color
59 colorbar : bool
60 If True a colorbar will be included as part of the plot (Defaults to False).
61 Only works when kind=hexbin
62 ax: axes
63 Matplotlib axes
64 divergences_kwargs : dicts, optional
65 Additional keywords passed to ax.scatter for divergences
66 plot_kwargs : dicts, optional
67 Additional keywords passed to ax.plot, az.plot_kde or ax.hexbin
68 Returns
69 -------
70 ax : matplotlib axes
71
72 Examples
73 --------
74 KDE Pair Plot
75
76 .. plot::
77 :context: close-figs
78
79 >>> import arviz as az
80 >>> centered = az.load_arviz_data('centered_eight')
81 >>> coords = {'school': ['Choate', 'Deerfield']}
82 >>> az.plot_pair(centered,
83 >>> var_names=['theta', 'mu', 'tau'],
84 >>> kind='kde',
85 >>> coords=coords,
86 >>> divergences=True,
87 >>> textsize=18)
88
89 Hexbin pair plot
90
91 .. plot::
92 :context: close-figs
93
94 >>> az.plot_pair(centered,
95 >>> var_names=['theta', 'mu'],
96 >>> coords=coords,
97 >>> textsize=18,
98 >>> kind='hexbin')
99
100 Pair plot showing divergences
101
102 .. plot::
103 :context: close-figs
104
105 >>> az.plot_pair(centered,
106 ... var_names=['theta', 'mu', 'tau'],
107 ... coords=coords,
108 ... divergences=True,
109 ... textsize=18)
110 """
111 valid_kinds = ["scatter", "kde", "hexbin"]
112 if kind not in valid_kinds:
113 raise ValueError(
114 ("Plot type {} not recognized." "Plot type must be in {}").format(kind, valid_kinds)
115 )
116
117 if coords is None:
118 coords = {}
119
120 if plot_kwargs is None:
121 plot_kwargs = {}
122
123 if kind == "scatter":
124 plot_kwargs.setdefault("marker", ".")
125 plot_kwargs.setdefault("lw", 0)
126
127 if divergences_kwargs is None:
128 divergences_kwargs = {}
129
130 divergences_kwargs.setdefault("marker", "o")
131 divergences_kwargs.setdefault("markeredgecolor", "k")
132 divergences_kwargs.setdefault("color", "C1")
133 divergences_kwargs.setdefault("lw", 0)
134
135 # Get posterior draws and combine chains
136 posterior_data = convert_to_dataset(data, group="posterior")
137 var_names = _var_names(var_names, posterior_data)
138 flat_var_names, _posterior = xarray_to_ndarray(
139 get_coords(posterior_data, coords), var_names=var_names, combined=True
140 )
141
142 # Get diverging draws and combine chains
143 if divergences:
144 divergent_data = convert_to_dataset(data, group="sample_stats")
145 _, diverging_mask = xarray_to_ndarray(
146 divergent_data, var_names=("diverging",), combined=True
147 )
148 diverging_mask = np.squeeze(diverging_mask)
149
150 if gridsize == "auto":
151 gridsize = int(len(_posterior[0]) ** 0.35)
152
153 numvars = len(flat_var_names)
154
155 if numvars < 2:
156 raise Exception("Number of variables to be plotted must be 2 or greater.")
157
158 if numvars == 2:
159 (figsize, ax_labelsize, _, xt_labelsize, _, _) = _scale_fig_size(
160 figsize, textsize, numvars - 1, numvars - 1
161 )
162
163 if ax is None:
164 fig, ax = plt.subplots(figsize=figsize, constrained_layout=True)
165
166 if kind == "scatter":
167 ax.plot(_posterior[0], _posterior[1], **plot_kwargs)
168 elif kind == "kde":
169 plot_kde(
170 _posterior[0],
171 _posterior[1],
172 contour=contour,
173 fill_last=fill_last,
174 ax=ax,
175 **plot_kwargs
176 )
177 else:
178 hexbin = ax.hexbin(
179 _posterior[0], _posterior[1], mincnt=1, gridsize=gridsize, **plot_kwargs
180 )
181 ax.grid(False)
182
183 if kind == "hexbin" and colorbar:
184 cbar = ax.figure.colorbar(hexbin, ticks=[hexbin.norm.vmin, hexbin.norm.vmax], ax=ax)
185 cbar.ax.set_yticklabels(["low", "high"], fontsize=ax_labelsize)
186
187 if divergences:
188 ax.plot(
189 _posterior[0][diverging_mask], _posterior[1][diverging_mask], **divergences_kwargs
190 )
191
192 ax.set_xlabel("{}".format(flat_var_names[0]), fontsize=ax_labelsize, wrap=True)
193 ax.set_ylabel("{}".format(flat_var_names[1]), fontsize=ax_labelsize, wrap=True)
194 ax.tick_params(labelsize=xt_labelsize)
195 axs = ax
196
197 else:
198 (figsize, ax_labelsize, _, xt_labelsize, _, _) = _scale_fig_size(
199 figsize, textsize, numvars - 2, numvars - 2
200 )
201
202 if ax is None:
203 fig, ax = plt.subplots(
204 numvars - 1, numvars - 1, figsize=figsize, constrained_layout=True
205 )
206 axs = []
207 hexbin_values = []
208 for i in range(0, numvars - 1):
209 var1 = _posterior[i]
210
211 for j in range(0, numvars - 1):
212 if j < i:
213 ax[j, i].axis("off")
214 continue
215
216 var2 = _posterior[j + 1]
217
218 if kind == "scatter":
219 ax[j, i].plot(var1, var2, **plot_kwargs)
220
221 elif kind == "kde":
222 plot_kde(
223 var1, var2, contour=contour, fill_last=fill_last, ax=ax[j, i], **plot_kwargs
224 )
225
226 else:
227 ax[j, i].grid(False)
228 hexbin = ax[j, i].hexbin(var1, var2, mincnt=1, gridsize=gridsize, **plot_kwargs)
229 if kind == "hexbin" and colorbar:
230 hexbin_values.append(hexbin.norm.vmin)
231 hexbin_values.append(hexbin.norm.vmax)
232 if j == i == 0 and colorbar:
233 divider = make_axes_locatable(ax[0, 1])
234 cax = divider.append_axes("left", size="7%")
235 cbar = fig.colorbar(
236 hexbin, ticks=[hexbin.norm.vmin, hexbin.norm.vmax], cax=cax
237 )
238 cbar.ax.set_yticklabels(["low", "high"], fontsize=ax_labelsize)
239
240 if divergences:
241 ax[j, i].plot(var1[diverging_mask], var2[diverging_mask], **divergences_kwargs)
242
243 if j + 1 != numvars - 1:
244 ax[j, i].axes.get_xaxis().set_major_formatter(NullFormatter())
245 else:
246 ax[j, i].set_xlabel(
247 "{}".format(flat_var_names[i]), fontsize=ax_labelsize, wrap=True
248 )
249 if i != 0:
250 ax[j, i].axes.get_yaxis().set_major_formatter(NullFormatter())
251 else:
252 ax[j, i].set_ylabel(
253 "{}".format(flat_var_names[j + 1]), fontsize=ax_labelsize, wrap=True
254 )
255
256 ax[j, i].tick_params(labelsize=xt_labelsize)
257 axs.append(ax)
258
259 return axs
260
[end of arviz/plots/pairplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/arviz/plots/pairplot.py b/arviz/plots/pairplot.py
--- a/arviz/plots/pairplot.py
+++ b/arviz/plots/pairplot.py
@@ -192,7 +192,6 @@
ax.set_xlabel("{}".format(flat_var_names[0]), fontsize=ax_labelsize, wrap=True)
ax.set_ylabel("{}".format(flat_var_names[1]), fontsize=ax_labelsize, wrap=True)
ax.tick_params(labelsize=xt_labelsize)
- axs = ax
else:
(figsize, ax_labelsize, _, xt_labelsize, _, _) = _scale_fig_size(
@@ -203,7 +202,6 @@
fig, ax = plt.subplots(
numvars - 1, numvars - 1, figsize=figsize, constrained_layout=True
)
- axs = []
hexbin_values = []
for i in range(0, numvars - 1):
var1 = _posterior[i]
@@ -254,6 +252,5 @@
)
ax[j, i].tick_params(labelsize=xt_labelsize)
- axs.append(ax)
- return axs
+ return ax
| {"golden_diff": "diff --git a/arviz/plots/pairplot.py b/arviz/plots/pairplot.py\n--- a/arviz/plots/pairplot.py\n+++ b/arviz/plots/pairplot.py\n@@ -192,7 +192,6 @@\n ax.set_xlabel(\"{}\".format(flat_var_names[0]), fontsize=ax_labelsize, wrap=True)\n ax.set_ylabel(\"{}\".format(flat_var_names[1]), fontsize=ax_labelsize, wrap=True)\n ax.tick_params(labelsize=xt_labelsize)\n- axs = ax\n \n else:\n (figsize, ax_labelsize, _, xt_labelsize, _, _) = _scale_fig_size(\n@@ -203,7 +202,6 @@\n fig, ax = plt.subplots(\n numvars - 1, numvars - 1, figsize=figsize, constrained_layout=True\n )\n- axs = []\n hexbin_values = []\n for i in range(0, numvars - 1):\n var1 = _posterior[i]\n@@ -254,6 +252,5 @@\n )\n \n ax[j, i].tick_params(labelsize=xt_labelsize)\n- axs.append(ax)\n \n- return axs\n+ return ax\n", "issue": "Return value of plot_pair is numvars the same\nThe return value of `plot_pair` is a list that contains `numvars` times the array of subplots. I guess it comes from the original version with gridspec. \r\n\r\nLine 168 defines `axs` empty list, which is no longer used, line 219 appends `ax` to `axs`, but `ax` already contains all the subplots. Eventually, `axs` is returned instead of `ax`.\r\n\r\nIn addition, maybe the docstring should be updated to specify that `ax` (both as input and as return value) is actually an array of axes, not an axes object. It cannot be a single axes because there are many, but I am not sure if everyone will see it right away.\r\n\n", "before_files": [{"content": "\"\"\"Plot a scatter or hexbin of sampled parameters.\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import NullFormatter\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom ..data import convert_to_dataset\nfrom .kdeplot import plot_kde\nfrom .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords\nfrom ..utils import _var_names\n\n\ndef plot_pair(\n data,\n var_names=None,\n coords=None,\n figsize=None,\n textsize=None,\n kind=\"scatter\",\n gridsize=\"auto\",\n contour=True,\n fill_last=True,\n divergences=False,\n colorbar=False,\n ax=None,\n divergences_kwargs=None,\n plot_kwargs=None,\n):\n \"\"\"\n Plot a scatter or hexbin matrix of the sampled parameters.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names : list of variable names\n Variables to be plotted, if None all variable are plotted\n coords : mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n figsize : figure size tuple\n If None, size is (8 + numvars, 8 + numvars)\n textsize: int\n Text size for labels. If None it will be autoscaled based on figsize.\n kind : str\n Type of plot to display (kde or hexbin)\n gridsize : int or (int, int), optional\n Only works for kind=hexbin.\n The number of hexagons in the x-direction. The corresponding number of hexagons in the\n y-direction is chosen such that the hexagons are approximately regular.\n Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons\n in the x-direction and the y-direction.\n contour : bool\n If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.\n fill_last : bool\n If True fill the last contour of the 2D KDE plot. Defaults to True.\n divergences : Boolean\n If True divergences will be plotted in a different color\n colorbar : bool\n If True a colorbar will be included as part of the plot (Defaults to False).\n Only works when kind=hexbin\n ax: axes\n Matplotlib axes\n divergences_kwargs : dicts, optional\n Additional keywords passed to ax.scatter for divergences\n plot_kwargs : dicts, optional\n Additional keywords passed to ax.plot, az.plot_kde or ax.hexbin\n Returns\n -------\n ax : matplotlib axes\n\n Examples\n --------\n KDE Pair Plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> centered = az.load_arviz_data('centered_eight')\n >>> coords = {'school': ['Choate', 'Deerfield']}\n >>> az.plot_pair(centered,\n >>> var_names=['theta', 'mu', 'tau'],\n >>> kind='kde',\n >>> coords=coords,\n >>> divergences=True,\n >>> textsize=18)\n\n Hexbin pair plot\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_pair(centered,\n >>> var_names=['theta', 'mu'],\n >>> coords=coords,\n >>> textsize=18,\n >>> kind='hexbin')\n\n Pair plot showing divergences\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_pair(centered,\n ... var_names=['theta', 'mu', 'tau'],\n ... coords=coords,\n ... divergences=True,\n ... textsize=18)\n \"\"\"\n valid_kinds = [\"scatter\", \"kde\", \"hexbin\"]\n if kind not in valid_kinds:\n raise ValueError(\n (\"Plot type {} not recognized.\" \"Plot type must be in {}\").format(kind, valid_kinds)\n )\n\n if coords is None:\n coords = {}\n\n if plot_kwargs is None:\n plot_kwargs = {}\n\n if kind == \"scatter\":\n plot_kwargs.setdefault(\"marker\", \".\")\n plot_kwargs.setdefault(\"lw\", 0)\n\n if divergences_kwargs is None:\n divergences_kwargs = {}\n\n divergences_kwargs.setdefault(\"marker\", \"o\")\n divergences_kwargs.setdefault(\"markeredgecolor\", \"k\")\n divergences_kwargs.setdefault(\"color\", \"C1\")\n divergences_kwargs.setdefault(\"lw\", 0)\n\n # Get posterior draws and combine chains\n posterior_data = convert_to_dataset(data, group=\"posterior\")\n var_names = _var_names(var_names, posterior_data)\n flat_var_names, _posterior = xarray_to_ndarray(\n get_coords(posterior_data, coords), var_names=var_names, combined=True\n )\n\n # Get diverging draws and combine chains\n if divergences:\n divergent_data = convert_to_dataset(data, group=\"sample_stats\")\n _, diverging_mask = xarray_to_ndarray(\n divergent_data, var_names=(\"diverging\",), combined=True\n )\n diverging_mask = np.squeeze(diverging_mask)\n\n if gridsize == \"auto\":\n gridsize = int(len(_posterior[0]) ** 0.35)\n\n numvars = len(flat_var_names)\n\n if numvars < 2:\n raise Exception(\"Number of variables to be plotted must be 2 or greater.\")\n\n if numvars == 2:\n (figsize, ax_labelsize, _, xt_labelsize, _, _) = _scale_fig_size(\n figsize, textsize, numvars - 1, numvars - 1\n )\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize, constrained_layout=True)\n\n if kind == \"scatter\":\n ax.plot(_posterior[0], _posterior[1], **plot_kwargs)\n elif kind == \"kde\":\n plot_kde(\n _posterior[0],\n _posterior[1],\n contour=contour,\n fill_last=fill_last,\n ax=ax,\n **plot_kwargs\n )\n else:\n hexbin = ax.hexbin(\n _posterior[0], _posterior[1], mincnt=1, gridsize=gridsize, **plot_kwargs\n )\n ax.grid(False)\n\n if kind == \"hexbin\" and colorbar:\n cbar = ax.figure.colorbar(hexbin, ticks=[hexbin.norm.vmin, hexbin.norm.vmax], ax=ax)\n cbar.ax.set_yticklabels([\"low\", \"high\"], fontsize=ax_labelsize)\n\n if divergences:\n ax.plot(\n _posterior[0][diverging_mask], _posterior[1][diverging_mask], **divergences_kwargs\n )\n\n ax.set_xlabel(\"{}\".format(flat_var_names[0]), fontsize=ax_labelsize, wrap=True)\n ax.set_ylabel(\"{}\".format(flat_var_names[1]), fontsize=ax_labelsize, wrap=True)\n ax.tick_params(labelsize=xt_labelsize)\n axs = ax\n\n else:\n (figsize, ax_labelsize, _, xt_labelsize, _, _) = _scale_fig_size(\n figsize, textsize, numvars - 2, numvars - 2\n )\n\n if ax is None:\n fig, ax = plt.subplots(\n numvars - 1, numvars - 1, figsize=figsize, constrained_layout=True\n )\n axs = []\n hexbin_values = []\n for i in range(0, numvars - 1):\n var1 = _posterior[i]\n\n for j in range(0, numvars - 1):\n if j < i:\n ax[j, i].axis(\"off\")\n continue\n\n var2 = _posterior[j + 1]\n\n if kind == \"scatter\":\n ax[j, i].plot(var1, var2, **plot_kwargs)\n\n elif kind == \"kde\":\n plot_kde(\n var1, var2, contour=contour, fill_last=fill_last, ax=ax[j, i], **plot_kwargs\n )\n\n else:\n ax[j, i].grid(False)\n hexbin = ax[j, i].hexbin(var1, var2, mincnt=1, gridsize=gridsize, **plot_kwargs)\n if kind == \"hexbin\" and colorbar:\n hexbin_values.append(hexbin.norm.vmin)\n hexbin_values.append(hexbin.norm.vmax)\n if j == i == 0 and colorbar:\n divider = make_axes_locatable(ax[0, 1])\n cax = divider.append_axes(\"left\", size=\"7%\")\n cbar = fig.colorbar(\n hexbin, ticks=[hexbin.norm.vmin, hexbin.norm.vmax], cax=cax\n )\n cbar.ax.set_yticklabels([\"low\", \"high\"], fontsize=ax_labelsize)\n\n if divergences:\n ax[j, i].plot(var1[diverging_mask], var2[diverging_mask], **divergences_kwargs)\n\n if j + 1 != numvars - 1:\n ax[j, i].axes.get_xaxis().set_major_formatter(NullFormatter())\n else:\n ax[j, i].set_xlabel(\n \"{}\".format(flat_var_names[i]), fontsize=ax_labelsize, wrap=True\n )\n if i != 0:\n ax[j, i].axes.get_yaxis().set_major_formatter(NullFormatter())\n else:\n ax[j, i].set_ylabel(\n \"{}\".format(flat_var_names[j + 1]), fontsize=ax_labelsize, wrap=True\n )\n\n ax[j, i].tick_params(labelsize=xt_labelsize)\n axs.append(ax)\n\n return axs\n", "path": "arviz/plots/pairplot.py"}]} | 3,564 | 273 |
gh_patches_debug_44407 | rasdani/github-patches | git_diff | google__mobly-227 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`snippet_shell` is broken for python3
```
$ snippet_shell.py --mbs
s.logINFO:root:[AndroidDevice|HT67K0300103] Launching snippet apk com.google.android.mobly.snippet.bundled with protocol v1
VTraceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.6/bin/snippet_shell.py", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/Users/angli/Developer/mobly/tools/snippet_shell.py", line 84, in <module>
SnippetShell(package).main(args.serial)
File "/Users/angli/Developer/mobly/mobly/controllers/android_device_lib/jsonrpc_shell_base.py", line 88, in main
self.start_console()
File "/Users/angli/Developer/mobly/mobly/controllers/android_device_lib/jsonrpc_shell_base.py", line 73, in start_console
self._start_services(console_env)
File "/Users/angli/Developer/mobly/tools/snippet_shell.py", line 43, in _start_services
self._ad.load_snippet(name='snippet', package=self._package)
File "/Users/angli/Developer/mobly/mobly/controllers/android_device.py", line 646, in load_snippet
client.start_app_and_connect()
File "/Users/angli/Developer/mobly/mobly/controllers/android_device_lib/snippet_client.py", line 112, in start_app_and_connect
line)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/re.py", line 172, in match
return _compile(pattern, flags).match(string)
TypeError: cannot use a string pattern on a bytes-like object
```
</issue>
<code>
[start of mobly/controllers/android_device_lib/snippet_client.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """JSON RPC interface to Mobly Snippet Lib."""
15 import logging
16 import re
17 import time
18
19 from mobly import utils
20 from mobly.controllers.android_device_lib import adb
21 from mobly.controllers.android_device_lib import jsonrpc_client_base
22
23 _INSTRUMENTATION_RUNNER_PACKAGE = (
24 'com.google.android.mobly.snippet.SnippetRunner')
25
26 # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
27 _LAUNCH_CMD_V0 = ('am instrument -w -e action start -e port %s %s/' +
28 _INSTRUMENTATION_RUNNER_PACKAGE)
29
30 _LAUNCH_CMD_V1 = (
31 'am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
32
33 _STOP_CMD = (
34 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
35
36 # Maximum time to wait for a v0 snippet to start on the device (10 minutes).
37 # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
38 _APP_START_WAIT_TIME_V0 = 10 * 60
39
40
41 class Error(Exception):
42 pass
43
44
45 class ProtocolVersionError(Error):
46 """Raised when the protocol reported by the snippet is unknown."""
47
48
49 class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
50 """A client for interacting with snippet APKs using Mobly Snippet Lib.
51
52 See superclass documentation for a list of public attributes.
53
54 It currently supports both v0 and v1 snippet launch protocols, although
55 support for v0 will be removed in a future version.
56
57 For a description of the launch protocols, see the documentation in
58 mobly-snippet-lib, SnippetRunner.java.
59 """
60
61 def __init__(self, package, adb_proxy, log=logging.getLogger()):
62 """Initializes a SnippetClient.
63
64 Args:
65 package: (str) The package name of the apk where the snippets are
66 defined.
67 adb_proxy: (adb.AdbProxy) Adb proxy for running adb commands.
68 log: (logging.Logger) logger to which to send log messages.
69 """
70 super(SnippetClient, self).__init__(app_name=package, log=log)
71 self.package = package
72 self._adb = adb_proxy
73 self._proc = None
74
75 def start_app_and_connect(self):
76 """Overrides superclass. Launches a snippet app and connects to it."""
77 self._check_app_installed()
78
79 # Try launching the app with the v1 protocol. If that fails, fall back
80 # to v0 for compatibility. Use info here so people know exactly what's
81 # happening here, which is helpful since they need to create their own
82 # instrumentations and manifest.
83 self.log.info('Launching snippet apk %s with protocol v1',
84 self.package)
85 cmd = _LAUNCH_CMD_V1 % self.package
86 start_time = time.time()
87 self._proc = self._do_start_app(cmd)
88
89 # "Instrumentation crashed" could be due to several reasons, eg
90 # exception thrown during startup or just a launch protocol 0 snippet
91 # dying because it needs the port flag. Sadly we have no way to tell so
92 # just warn and retry as v0.
93 # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is
94 # removed.
95 line = self._read_line()
96 if line in ('INSTRUMENTATION_RESULT: shortMsg=Process crashed.',
97 'INSTRUMENTATION_RESULT: shortMsg='
98 'java.lang.IllegalArgumentException'):
99 self.log.warning('Snippet %s crashed on startup. This might be an '
100 'actual error or a snippet using deprecated v0 '
101 'start protocol. Retrying as a v0 snippet.',
102 self.package)
103 self.host_port = utils.get_available_host_port()
104 # Reuse the host port as the device port in v0 snippet. This isn't
105 # safe in general, but the protocol is deprecated.
106 cmd = _LAUNCH_CMD_V0 % (self.host_port, self.package)
107 self._proc = self._do_start_app(cmd)
108 self._connect_to_v0()
109 else:
110 # Check protocol version and get the device port
111 match = re.match('^SNIPPET START, PROTOCOL ([0-9]+) ([0-9]+)$',
112 line)
113 if not match or match.group(1) != '1':
114 raise ProtocolVersionError(line)
115 self._connect_to_v1()
116 self.log.debug('Snippet %s started after %.1fs on host port %s',
117 self.package, time.time() - start_time, self.host_port)
118
119 def stop_app(self):
120 # Kill the pending 'adb shell am instrument -w' process if there is one.
121 # Although killing the snippet apk would abort this process anyway, we
122 # want to call stop_standing_subprocess() to perform a health check,
123 # print the failure stack trace if there was any, and reap it from the
124 # process table.
125 self.log.debug('Stopping snippet apk %s', self.package)
126 try:
127 # Close the socket connection.
128 self.disconnect()
129 if self._proc:
130 utils.stop_standing_subprocess(self._proc)
131 out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')
132 if 'OK (0 tests)' not in out:
133 raise Error('Failed to stop existing apk. Unexpected '
134 'output: %s' % out)
135 finally:
136 # Always clean up the adb port
137 if self.host_port:
138 self._adb.forward(['--remove', 'tcp:%d' % self.host_port])
139
140 def _start_event_client(self):
141 """Overrides superclass."""
142 event_client = SnippetClient(
143 package=self.package, adb_proxy=self._adb, log=self.log)
144 event_client.host_port = self.host_port
145 event_client.connect(self.uid,
146 jsonrpc_client_base.JsonRpcCommand.CONTINUE)
147 return event_client
148
149 def _check_app_installed(self):
150 # Check that the Mobly Snippet app is installed.
151 out = self._adb.shell('pm list package')
152 if not utils.grep('^package:%s$' % self.package, out):
153 raise jsonrpc_client_base.AppStartError(
154 '%s is not installed on %s' % (self.package, self._adb.serial))
155 # Check that the app is instrumented.
156 out = self._adb.shell('pm list instrumentation')
157 matched_out = utils.grep('^instrumentation:%s/%s' %
158 (self.package,
159 _INSTRUMENTATION_RUNNER_PACKAGE), out)
160 if not matched_out:
161 raise jsonrpc_client_base.AppStartError(
162 '%s is installed on %s, but it is not instrumented.' %
163 (self.package, self._adb.serial))
164 match = re.search('^instrumentation:(.*)\/(.*) \(target=(.*)\)$',
165 matched_out[0])
166 target_name = match.group(3)
167 # Check that the instrumentation target is installed if it's not the
168 # same as the snippet package.
169 if target_name != self.package:
170 out = self._adb.shell('pm list package')
171 if not utils.grep('^package:%s$' % target_name, out):
172 raise jsonrpc_client_base.AppStartError(
173 'Instrumentation target %s is not installed on %s' %
174 (target_name, self._adb.serial))
175
176 def _do_start_app(self, launch_cmd):
177 adb_cmd = [adb.ADB]
178 if self._adb.serial:
179 adb_cmd += ['-s', self._adb.serial]
180 adb_cmd += ['shell', launch_cmd]
181 return utils.start_standing_subprocess(adb_cmd, shell=False)
182
183 # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is
184 # removed.
185 def _connect_to_v0(self):
186 self.device_port = self.host_port
187 self._adb.forward(
188 ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])
189 start_time = time.time()
190 expiration_time = start_time + _APP_START_WAIT_TIME_V0
191 while time.time() < expiration_time:
192 self.log.debug('Attempting to start %s.', self.package)
193 try:
194 self.connect()
195 return
196 except:
197 self.log.debug(
198 'v0 snippet %s is not yet running, retrying',
199 self.package,
200 exc_info=True)
201 time.sleep(1)
202 raise jsonrpc_client_base.AppStartError(
203 '%s failed to start on %s.' % (self.package, self._adb.serial))
204
205 def _connect_to_v1(self):
206 line = self._read_line()
207 match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line)
208 if not match:
209 raise ProtocolVersionError(line)
210 self.device_port = int(match.group(1))
211
212 # Forward the device port to a new host port, and connect to that port
213 self.host_port = utils.get_available_host_port()
214 self._adb.forward(
215 ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])
216 self.connect()
217
218 def _read_line(self):
219 line = self._proc.stdout.readline().rstrip()
220 self.log.debug('Read line from instrumentation output: "%s"', line)
221 return line
222
[end of mobly/controllers/android_device_lib/snippet_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py
--- a/mobly/controllers/android_device_lib/snippet_client.py
+++ b/mobly/controllers/android_device_lib/snippet_client.py
@@ -1,11 +1,11 @@
# Copyright 2016 Google Inc.
-#
+#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -42,7 +42,7 @@
pass
-class ProtocolVersionError(Error):
+class ProtocolVersionError(jsonrpc_client_base.AppStartError):
"""Raised when the protocol reported by the snippet is unknown."""
@@ -92,7 +92,7 @@
# just warn and retry as v0.
# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is
# removed.
- line = self._read_line()
+ line = self._read_protocol_line()
if line in ('INSTRUMENTATION_RESULT: shortMsg=Process crashed.',
'INSTRUMENTATION_RESULT: shortMsg='
'java.lang.IllegalArgumentException'):
@@ -185,7 +185,8 @@
def _connect_to_v0(self):
self.device_port = self.host_port
self._adb.forward(
- ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])
+ ['tcp:%d' % self.host_port,
+ 'tcp:%d' % self.device_port])
start_time = time.time()
expiration_time = start_time + _APP_START_WAIT_TIME_V0
while time.time() < expiration_time:
@@ -203,19 +204,46 @@
'%s failed to start on %s.' % (self.package, self._adb.serial))
def _connect_to_v1(self):
- line = self._read_line()
+ line = self._read_protocol_line()
match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line)
if not match:
- raise ProtocolVersionError(line)
+ raise jsonrpc_client_base.AppStartError(line)
self.device_port = int(match.group(1))
# Forward the device port to a new host port, and connect to that port
self.host_port = utils.get_available_host_port()
self._adb.forward(
- ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])
+ ['tcp:%d' % self.host_port,
+ 'tcp:%d' % self.device_port])
self.connect()
- def _read_line(self):
- line = self._proc.stdout.readline().rstrip()
- self.log.debug('Read line from instrumentation output: "%s"', line)
- return line
+ def _read_protocol_line(self):
+ """Reads the next line of instrumentation output relevant to snippets.
+
+ This method will skip over lines that don't start with 'SNIPPET' or
+ 'INSTRUMENTATION_RESULT'.
+
+ Returns:
+ (str) Next line of snippet-related instrumentation output, stripped.
+
+ Raises:
+ jsonrpc_client_base.AppStartError: If EOF is reached without any
+ protocol lines being read.
+ """
+ while True:
+ line = self._proc.stdout.readline().decode('utf-8')
+ if not line:
+ raise jsonrpc_client_base.AppStartError(
+ 'Unexpected EOF waiting for app to start')
+ # readline() uses an empty string to mark EOF, and a single newline
+ # to mark regular empty lines in the output. Don't move the strip()
+ # call above the truthiness check, or this method will start
+ # considering any blank output line to be EOF.
+ line = line.strip()
+ if (line.startswith('INSTRUMENTATION_RESULT:') or
+ line.startswith('SNIPPET ')):
+ self.log.debug(
+ 'Accepted line from instrumentation output: "%s"', line)
+ return line
+ self.log.debug('Discarded line from instrumentation output: "%s"',
+ line)
| {"golden_diff": "diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py\n--- a/mobly/controllers/android_device_lib/snippet_client.py\n+++ b/mobly/controllers/android_device_lib/snippet_client.py\n@@ -1,11 +1,11 @@\n # Copyright 2016 Google Inc.\n-# \n+#\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n # You may obtain a copy of the License at\n-# \n+#\n # http://www.apache.org/licenses/LICENSE-2.0\n-# \n+#\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n@@ -42,7 +42,7 @@\n pass\n \n \n-class ProtocolVersionError(Error):\n+class ProtocolVersionError(jsonrpc_client_base.AppStartError):\n \"\"\"Raised when the protocol reported by the snippet is unknown.\"\"\"\n \n \n@@ -92,7 +92,7 @@\n # just warn and retry as v0.\n # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is\n # removed.\n- line = self._read_line()\n+ line = self._read_protocol_line()\n if line in ('INSTRUMENTATION_RESULT: shortMsg=Process crashed.',\n 'INSTRUMENTATION_RESULT: shortMsg='\n 'java.lang.IllegalArgumentException'):\n@@ -185,7 +185,8 @@\n def _connect_to_v0(self):\n self.device_port = self.host_port\n self._adb.forward(\n- ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])\n+ ['tcp:%d' % self.host_port,\n+ 'tcp:%d' % self.device_port])\n start_time = time.time()\n expiration_time = start_time + _APP_START_WAIT_TIME_V0\n while time.time() < expiration_time:\n@@ -203,19 +204,46 @@\n '%s failed to start on %s.' % (self.package, self._adb.serial))\n \n def _connect_to_v1(self):\n- line = self._read_line()\n+ line = self._read_protocol_line()\n match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line)\n if not match:\n- raise ProtocolVersionError(line)\n+ raise jsonrpc_client_base.AppStartError(line)\n self.device_port = int(match.group(1))\n \n # Forward the device port to a new host port, and connect to that port\n self.host_port = utils.get_available_host_port()\n self._adb.forward(\n- ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])\n+ ['tcp:%d' % self.host_port,\n+ 'tcp:%d' % self.device_port])\n self.connect()\n \n- def _read_line(self):\n- line = self._proc.stdout.readline().rstrip()\n- self.log.debug('Read line from instrumentation output: \"%s\"', line)\n- return line\n+ def _read_protocol_line(self):\n+ \"\"\"Reads the next line of instrumentation output relevant to snippets.\n+\n+ This method will skip over lines that don't start with 'SNIPPET' or\n+ 'INSTRUMENTATION_RESULT'.\n+\n+ Returns:\n+ (str) Next line of snippet-related instrumentation output, stripped.\n+\n+ Raises:\n+ jsonrpc_client_base.AppStartError: If EOF is reached without any\n+ protocol lines being read.\n+ \"\"\"\n+ while True:\n+ line = self._proc.stdout.readline().decode('utf-8')\n+ if not line:\n+ raise jsonrpc_client_base.AppStartError(\n+ 'Unexpected EOF waiting for app to start')\n+ # readline() uses an empty string to mark EOF, and a single newline\n+ # to mark regular empty lines in the output. Don't move the strip()\n+ # call above the truthiness check, or this method will start\n+ # considering any blank output line to be EOF.\n+ line = line.strip()\n+ if (line.startswith('INSTRUMENTATION_RESULT:') or\n+ line.startswith('SNIPPET ')):\n+ self.log.debug(\n+ 'Accepted line from instrumentation output: \"%s\"', line)\n+ return line\n+ self.log.debug('Discarded line from instrumentation output: \"%s\"',\n+ line)\n", "issue": "`snippet_shell` is broken for python3\n```\r\n$ snippet_shell.py --mbs\r\ns.logINFO:root:[AndroidDevice|HT67K0300103] Launching snippet apk com.google.android.mobly.snippet.bundled with protocol v1\r\nVTraceback (most recent call last):\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/bin/snippet_shell.py\", line 6, in <module>\r\n exec(compile(open(__file__).read(), __file__, 'exec'))\r\n File \"/Users/angli/Developer/mobly/tools/snippet_shell.py\", line 84, in <module>\r\n SnippetShell(package).main(args.serial)\r\n File \"/Users/angli/Developer/mobly/mobly/controllers/android_device_lib/jsonrpc_shell_base.py\", line 88, in main\r\n self.start_console()\r\n File \"/Users/angli/Developer/mobly/mobly/controllers/android_device_lib/jsonrpc_shell_base.py\", line 73, in start_console\r\n self._start_services(console_env)\r\n File \"/Users/angli/Developer/mobly/tools/snippet_shell.py\", line 43, in _start_services\r\n self._ad.load_snippet(name='snippet', package=self._package)\r\n File \"/Users/angli/Developer/mobly/mobly/controllers/android_device.py\", line 646, in load_snippet\r\n client.start_app_and_connect()\r\n File \"/Users/angli/Developer/mobly/mobly/controllers/android_device_lib/snippet_client.py\", line 112, in start_app_and_connect\r\n line)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/re.py\", line 172, in match\r\n return _compile(pattern, flags).match(string)\r\nTypeError: cannot use a string pattern on a bytes-like object\r\n```\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"JSON RPC interface to Mobly Snippet Lib.\"\"\"\nimport logging\nimport re\nimport time\n\nfrom mobly import utils\nfrom mobly.controllers.android_device_lib import adb\nfrom mobly.controllers.android_device_lib import jsonrpc_client_base\n\n_INSTRUMENTATION_RUNNER_PACKAGE = (\n 'com.google.android.mobly.snippet.SnippetRunner')\n\n# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.\n_LAUNCH_CMD_V0 = ('am instrument -w -e action start -e port %s %s/' +\n _INSTRUMENTATION_RUNNER_PACKAGE)\n\n_LAUNCH_CMD_V1 = (\n 'am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)\n\n_STOP_CMD = (\n 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)\n\n# Maximum time to wait for a v0 snippet to start on the device (10 minutes).\n# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.\n_APP_START_WAIT_TIME_V0 = 10 * 60\n\n\nclass Error(Exception):\n pass\n\n\nclass ProtocolVersionError(Error):\n \"\"\"Raised when the protocol reported by the snippet is unknown.\"\"\"\n\n\nclass SnippetClient(jsonrpc_client_base.JsonRpcClientBase):\n \"\"\"A client for interacting with snippet APKs using Mobly Snippet Lib.\n\n See superclass documentation for a list of public attributes.\n\n It currently supports both v0 and v1 snippet launch protocols, although\n support for v0 will be removed in a future version.\n\n For a description of the launch protocols, see the documentation in\n mobly-snippet-lib, SnippetRunner.java.\n \"\"\"\n\n def __init__(self, package, adb_proxy, log=logging.getLogger()):\n \"\"\"Initializes a SnippetClient.\n \n Args:\n package: (str) The package name of the apk where the snippets are\n defined.\n adb_proxy: (adb.AdbProxy) Adb proxy for running adb commands.\n log: (logging.Logger) logger to which to send log messages.\n \"\"\"\n super(SnippetClient, self).__init__(app_name=package, log=log)\n self.package = package\n self._adb = adb_proxy\n self._proc = None\n\n def start_app_and_connect(self):\n \"\"\"Overrides superclass. Launches a snippet app and connects to it.\"\"\"\n self._check_app_installed()\n\n # Try launching the app with the v1 protocol. If that fails, fall back\n # to v0 for compatibility. Use info here so people know exactly what's\n # happening here, which is helpful since they need to create their own\n # instrumentations and manifest.\n self.log.info('Launching snippet apk %s with protocol v1',\n self.package)\n cmd = _LAUNCH_CMD_V1 % self.package\n start_time = time.time()\n self._proc = self._do_start_app(cmd)\n\n # \"Instrumentation crashed\" could be due to several reasons, eg\n # exception thrown during startup or just a launch protocol 0 snippet\n # dying because it needs the port flag. Sadly we have no way to tell so\n # just warn and retry as v0.\n # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is\n # removed.\n line = self._read_line()\n if line in ('INSTRUMENTATION_RESULT: shortMsg=Process crashed.',\n 'INSTRUMENTATION_RESULT: shortMsg='\n 'java.lang.IllegalArgumentException'):\n self.log.warning('Snippet %s crashed on startup. This might be an '\n 'actual error or a snippet using deprecated v0 '\n 'start protocol. Retrying as a v0 snippet.',\n self.package)\n self.host_port = utils.get_available_host_port()\n # Reuse the host port as the device port in v0 snippet. This isn't\n # safe in general, but the protocol is deprecated.\n cmd = _LAUNCH_CMD_V0 % (self.host_port, self.package)\n self._proc = self._do_start_app(cmd)\n self._connect_to_v0()\n else:\n # Check protocol version and get the device port\n match = re.match('^SNIPPET START, PROTOCOL ([0-9]+) ([0-9]+)$',\n line)\n if not match or match.group(1) != '1':\n raise ProtocolVersionError(line)\n self._connect_to_v1()\n self.log.debug('Snippet %s started after %.1fs on host port %s',\n self.package, time.time() - start_time, self.host_port)\n\n def stop_app(self):\n # Kill the pending 'adb shell am instrument -w' process if there is one.\n # Although killing the snippet apk would abort this process anyway, we\n # want to call stop_standing_subprocess() to perform a health check,\n # print the failure stack trace if there was any, and reap it from the\n # process table.\n self.log.debug('Stopping snippet apk %s', self.package)\n try:\n # Close the socket connection.\n self.disconnect()\n if self._proc:\n utils.stop_standing_subprocess(self._proc)\n out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')\n if 'OK (0 tests)' not in out:\n raise Error('Failed to stop existing apk. Unexpected '\n 'output: %s' % out)\n finally:\n # Always clean up the adb port\n if self.host_port:\n self._adb.forward(['--remove', 'tcp:%d' % self.host_port])\n\n def _start_event_client(self):\n \"\"\"Overrides superclass.\"\"\"\n event_client = SnippetClient(\n package=self.package, adb_proxy=self._adb, log=self.log)\n event_client.host_port = self.host_port\n event_client.connect(self.uid,\n jsonrpc_client_base.JsonRpcCommand.CONTINUE)\n return event_client\n\n def _check_app_installed(self):\n # Check that the Mobly Snippet app is installed.\n out = self._adb.shell('pm list package')\n if not utils.grep('^package:%s$' % self.package, out):\n raise jsonrpc_client_base.AppStartError(\n '%s is not installed on %s' % (self.package, self._adb.serial))\n # Check that the app is instrumented.\n out = self._adb.shell('pm list instrumentation')\n matched_out = utils.grep('^instrumentation:%s/%s' %\n (self.package,\n _INSTRUMENTATION_RUNNER_PACKAGE), out)\n if not matched_out:\n raise jsonrpc_client_base.AppStartError(\n '%s is installed on %s, but it is not instrumented.' %\n (self.package, self._adb.serial))\n match = re.search('^instrumentation:(.*)\\/(.*) \\(target=(.*)\\)$',\n matched_out[0])\n target_name = match.group(3)\n # Check that the instrumentation target is installed if it's not the\n # same as the snippet package.\n if target_name != self.package:\n out = self._adb.shell('pm list package')\n if not utils.grep('^package:%s$' % target_name, out):\n raise jsonrpc_client_base.AppStartError(\n 'Instrumentation target %s is not installed on %s' %\n (target_name, self._adb.serial))\n\n def _do_start_app(self, launch_cmd):\n adb_cmd = [adb.ADB]\n if self._adb.serial:\n adb_cmd += ['-s', self._adb.serial]\n adb_cmd += ['shell', launch_cmd]\n return utils.start_standing_subprocess(adb_cmd, shell=False)\n\n # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is\n # removed.\n def _connect_to_v0(self):\n self.device_port = self.host_port\n self._adb.forward(\n ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])\n start_time = time.time()\n expiration_time = start_time + _APP_START_WAIT_TIME_V0\n while time.time() < expiration_time:\n self.log.debug('Attempting to start %s.', self.package)\n try:\n self.connect()\n return\n except:\n self.log.debug(\n 'v0 snippet %s is not yet running, retrying',\n self.package,\n exc_info=True)\n time.sleep(1)\n raise jsonrpc_client_base.AppStartError(\n '%s failed to start on %s.' % (self.package, self._adb.serial))\n\n def _connect_to_v1(self):\n line = self._read_line()\n match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line)\n if not match:\n raise ProtocolVersionError(line)\n self.device_port = int(match.group(1))\n\n # Forward the device port to a new host port, and connect to that port\n self.host_port = utils.get_available_host_port()\n self._adb.forward(\n ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])\n self.connect()\n\n def _read_line(self):\n line = self._proc.stdout.readline().rstrip()\n self.log.debug('Read line from instrumentation output: \"%s\"', line)\n return line\n", "path": "mobly/controllers/android_device_lib/snippet_client.py"}]} | 3,697 | 1,011 |
gh_patches_debug_13248 | rasdani/github-patches | git_diff | graspologic-org__graspologic-965 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix to repeated compilation of numba
- [x] Does this PR have a descriptive title that could go in our release notes?
- [ ] Does this PR add any new dependencies?
- [ ] Does this PR modify any existing APIs?
- [ ] Is the change to the API backwards compatible?
- [ ] Have you built the documentation (reference and/or tutorial) and verified the generated documentation is appropriate?
Fixes #946
Utilizes the addition of a global function at the top of the file that calls the nb.jit function a single time instead of in the constructor. This speeds up the compilation and running speed of the code.
</issue>
<code>
[start of graspologic/models/edge_swaps.py]
1 from typing import Optional
2
3 import numba as nb
4 import numpy as np
5 from beartype import beartype
6 from scipy.sparse import csr_matrix, lil_matrix
7 from sklearn.utils import check_scalar
8
9 from graspologic.preconditions import check_argument
10 from graspologic.types import AdjacencyMatrix, Tuple
11 from graspologic.utils import import_graph, is_loopless, is_symmetric, is_unweighted
12
13
14 # Code based on: https://github.com/joelnish/double-edge-swap-mcmc/blob/master/dbl_edge_mcmc.py
15 class EdgeSwapper:
16 """
17 Degree Preserving Edge Swaps
18
19 This class allows for performing degree preserving edge swaps to
20 generate new networks with the same degree sequence as the input network.
21
22 Attributes
23 ----------
24 adjacency : np.ndarray OR csr_matrix, shape (n_verts, n_verts)
25 The initial adjacency matrix to perform edge swaps on. Must be unweighted and undirected.
26
27 edge_list : np.ndarray, shape (n_verts, 2)
28 The corresponding edgelist for the input network
29
30 seed: int, optional
31 Random seed to make outputs reproducible, must be positive
32
33
34 References
35 ----------
36 .. [1] Fosdick, B. K., Larremore, D. B., Nishimura, J., & Ugander, J. (2018).
37 Configuring random graph models with fixed degree sequences.
38 Siam Review, 60(2), 315-355.
39
40 .. [2] Carstens, C. J., & Horadam, K. J. (2017).
41 Switching edges to randomize networks: what goes wrong and how to fix it.
42 Journal of Complex Networks, 5(3), 337-351.
43
44 .. [3] https://github.com/joelnish/double-edge-swap-mcmc/blob/master/dbl_edge_mcmc.py
45 """
46
47 @beartype
48 def __init__(self, adjacency: AdjacencyMatrix, seed: Optional[int] = None):
49
50 weight_check = is_unweighted(adjacency)
51 check_argument(weight_check, "adjacency must be unweighted")
52
53 loop_check = is_loopless(adjacency)
54 check_argument(loop_check, "adjacency cannot have loops")
55
56 direct_check = is_symmetric(adjacency)
57 check_argument(direct_check, "adjacency must be undirected")
58
59 max_seed = np.iinfo(np.uint32).max
60 if seed is None:
61 seed = np.random.randint(max_seed, dtype=np.int64)
62 seed = check_scalar(
63 seed, "seed", (int, np.integer), min_val=0, max_val=max_seed
64 )
65 self._rng = np.random.default_rng(seed)
66
67 adjacency = import_graph(adjacency, copy=True)
68
69 if isinstance(adjacency, csr_matrix):
70 # more efficient for manipulations which change sparsity structure
71 adjacency = lil_matrix(adjacency)
72 self._edge_swap_function = _edge_swap
73 else:
74 # for numpy input, use numba for JIT compilation
75 # NOTE: not convinced numba is helping much here, look into optimizing
76 self._edge_swap_function = nb.jit(_edge_swap)
77
78 self.adjacency = adjacency
79
80 edge_list = self._do_setup()
81 check_argument(len(edge_list) >= 2, "there must be at least 2 edges")
82 self.edge_list = edge_list
83
84 def _do_setup(self) -> np.ndarray:
85 """
86 Computes the edge_list from the adjancency matrix
87
88 Returns
89 -------
90 edge_list : np.ndarray, shape (n_verts, 2)
91 The corresponding edge_list of adjacency
92 """
93
94 # get edges for upper triangle of undirected graph
95 row_inds, col_inds = np.nonzero(self.adjacency)
96 upper = row_inds < col_inds
97 row_inds = row_inds[upper]
98 col_inds = col_inds[upper]
99 edge_list = np.stack((row_inds, col_inds)).T
100 return edge_list
101
102 def swap_edges(self, n_swaps: int = 1) -> Tuple[AdjacencyMatrix, np.ndarray]:
103 """
104 Performs a number of edge swaps on the graph
105
106 Parameters
107 ----------
108 n_swaps : int (default 1), optional
109 The number of edge swaps to be performed
110
111 Returns
112 -------
113 adjacency : np.ndarray OR csr.matrix, shape (n_verts, n_verts)
114 The adjancency matrix after a number of edge swaps are performed on the graph
115
116 edge_list : np.ndarray (n_verts, 2)
117 The edge_list after a number of edge swaps are perfomed on the graph
118 """
119
120 # Note: for some reason could not get reproducibility w/o setting seed
121 # inside of the _edge_swap_function itself
122 max_seed = np.iinfo(np.int32).max
123 for _ in range(n_swaps):
124 self.adjacency, self.edge_list = self._edge_swap_function(
125 self.adjacency,
126 self.edge_list,
127 seed=self._rng.integers(max_seed),
128 )
129
130 adjacency = self.adjacency
131 if isinstance(adjacency, lil_matrix):
132 adjacency = csr_matrix(adjacency)
133 else:
134 adjacency = adjacency.copy()
135
136 return adjacency, self.edge_list.copy()
137
138
139 def _edge_swap(
140 adjacency: AdjacencyMatrix, edge_list: np.ndarray, seed: Optional[int] = None
141 ) -> Tuple[AdjacencyMatrix, np.ndarray]:
142 """
143 Performs the edge swap on the adjacency matrix. If adjacency is
144 np.ndarray, then nopython=True is used in numba, but if adjacency
145 is csr_matrix, then forceobj=True is used in numba
146
147 Parameters
148 ----------
149 adjacency : np.ndarray OR csr_matrix, shape (n_verts, n_verts)
150 The initial adjacency matrix in which edge swaps are performed on it
151
152 edge_list : np.ndarray, shape (n_verts, 2)
153 The corresponding edge_list of adjacency
154
155 seed: int, optional
156 Random seed to make outputs reproducible, must be positive
157
158 Returns
159 -------
160 adjacency : np.ndarray OR csr_matrix, shape (n_verts, n_verts)
161 The adjancency matrix after an edge swap is performed on the graph
162
163 edge_list : np.ndarray (n_verts, 2)
164 The edge_list after an edge swap is perfomed on the graph
165 """
166
167 # need to use np.random here instead of the generator for numba compatibility
168 if seed is not None:
169 np.random.seed(seed)
170
171 # choose two indices at random
172 # NOTE: using np.random here for current numba compatibility
173 orig_inds = np.random.choice(len(edge_list), size=2, replace=False)
174
175 u, v = edge_list[orig_inds[0]]
176
177 # two types of swap orientations for undirected graph
178 if np.random.rand() < 0.5:
179 x, y = edge_list[orig_inds[1]]
180 else:
181 y, x = edge_list[orig_inds[1]]
182
183 # ensures no initial loops
184 if u == v or x == y:
185 return adjacency, edge_list
186
187 # ensures no loops after swap (must be swap on 4 distinct nodes)
188 if u == x or v == y:
189 return adjacency, edge_list
190
191 # save edge values
192 w_ux = adjacency[u, x]
193 w_vy = adjacency[v, y]
194
195 # ensures no multigraphs after swap
196 if w_ux >= 1 or w_vy >= 1:
197 return adjacency, edge_list
198
199 # perform the swap
200 adjacency[u, v] = 0
201 adjacency[v, u] = 0
202 adjacency[x, y] = 0
203 adjacency[y, x] = 0
204
205 adjacency[u, x] = 1
206 adjacency[x, u] = 1
207 adjacency[v, y] = 1
208 adjacency[y, v] = 1
209
210 # update edge list
211 edge_list[orig_inds[0]] = [u, x]
212 edge_list[orig_inds[1]] = [v, y]
213 return adjacency, edge_list
214
[end of graspologic/models/edge_swaps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/graspologic/models/edge_swaps.py b/graspologic/models/edge_swaps.py
--- a/graspologic/models/edge_swaps.py
+++ b/graspologic/models/edge_swaps.py
@@ -73,7 +73,7 @@
else:
# for numpy input, use numba for JIT compilation
# NOTE: not convinced numba is helping much here, look into optimizing
- self._edge_swap_function = nb.jit(_edge_swap)
+ self._edge_swap_function = _edge_swap_numba
self.adjacency = adjacency
@@ -211,3 +211,6 @@
edge_list[orig_inds[0]] = [u, x]
edge_list[orig_inds[1]] = [v, y]
return adjacency, edge_list
+
+
+_edge_swap_numba = nb.jit(_edge_swap)
| {"golden_diff": "diff --git a/graspologic/models/edge_swaps.py b/graspologic/models/edge_swaps.py\n--- a/graspologic/models/edge_swaps.py\n+++ b/graspologic/models/edge_swaps.py\n@@ -73,7 +73,7 @@\n else:\n # for numpy input, use numba for JIT compilation\n # NOTE: not convinced numba is helping much here, look into optimizing\n- self._edge_swap_function = nb.jit(_edge_swap)\n+ self._edge_swap_function = _edge_swap_numba\n \n self.adjacency = adjacency\n \n@@ -211,3 +211,6 @@\n edge_list[orig_inds[0]] = [u, x]\n edge_list[orig_inds[1]] = [v, y]\n return adjacency, edge_list\n+\n+\n+_edge_swap_numba = nb.jit(_edge_swap)\n", "issue": "Fix to repeated compilation of numba\n- [x] Does this PR have a descriptive title that could go in our release notes?\r\n- [ ] Does this PR add any new dependencies?\r\n- [ ] Does this PR modify any existing APIs?\r\n - [ ] Is the change to the API backwards compatible?\r\n- [ ] Have you built the documentation (reference and/or tutorial) and verified the generated documentation is appropriate?\r\n\r\nFixes #946\r\n\r\nUtilizes the addition of a global function at the top of the file that calls the nb.jit function a single time instead of in the constructor. This speeds up the compilation and running speed of the code.\n", "before_files": [{"content": "from typing import Optional\n\nimport numba as nb\nimport numpy as np\nfrom beartype import beartype\nfrom scipy.sparse import csr_matrix, lil_matrix\nfrom sklearn.utils import check_scalar\n\nfrom graspologic.preconditions import check_argument\nfrom graspologic.types import AdjacencyMatrix, Tuple\nfrom graspologic.utils import import_graph, is_loopless, is_symmetric, is_unweighted\n\n\n# Code based on: https://github.com/joelnish/double-edge-swap-mcmc/blob/master/dbl_edge_mcmc.py\nclass EdgeSwapper:\n \"\"\"\n Degree Preserving Edge Swaps\n\n This class allows for performing degree preserving edge swaps to\n generate new networks with the same degree sequence as the input network.\n\n Attributes\n ----------\n adjacency : np.ndarray OR csr_matrix, shape (n_verts, n_verts)\n The initial adjacency matrix to perform edge swaps on. Must be unweighted and undirected.\n\n edge_list : np.ndarray, shape (n_verts, 2)\n The corresponding edgelist for the input network\n\n seed: int, optional\n Random seed to make outputs reproducible, must be positive\n\n\n References\n ----------\n .. [1] Fosdick, B. K., Larremore, D. B., Nishimura, J., & Ugander, J. (2018).\n Configuring random graph models with fixed degree sequences.\n Siam Review, 60(2), 315-355.\n\n .. [2] Carstens, C. J., & Horadam, K. J. (2017).\n Switching edges to randomize networks: what goes wrong and how to fix it.\n Journal of Complex Networks, 5(3), 337-351.\n\n .. [3] https://github.com/joelnish/double-edge-swap-mcmc/blob/master/dbl_edge_mcmc.py\n \"\"\"\n\n @beartype\n def __init__(self, adjacency: AdjacencyMatrix, seed: Optional[int] = None):\n\n weight_check = is_unweighted(adjacency)\n check_argument(weight_check, \"adjacency must be unweighted\")\n\n loop_check = is_loopless(adjacency)\n check_argument(loop_check, \"adjacency cannot have loops\")\n\n direct_check = is_symmetric(adjacency)\n check_argument(direct_check, \"adjacency must be undirected\")\n\n max_seed = np.iinfo(np.uint32).max\n if seed is None:\n seed = np.random.randint(max_seed, dtype=np.int64)\n seed = check_scalar(\n seed, \"seed\", (int, np.integer), min_val=0, max_val=max_seed\n )\n self._rng = np.random.default_rng(seed)\n\n adjacency = import_graph(adjacency, copy=True)\n\n if isinstance(adjacency, csr_matrix):\n # more efficient for manipulations which change sparsity structure\n adjacency = lil_matrix(adjacency)\n self._edge_swap_function = _edge_swap\n else:\n # for numpy input, use numba for JIT compilation\n # NOTE: not convinced numba is helping much here, look into optimizing\n self._edge_swap_function = nb.jit(_edge_swap)\n\n self.adjacency = adjacency\n\n edge_list = self._do_setup()\n check_argument(len(edge_list) >= 2, \"there must be at least 2 edges\")\n self.edge_list = edge_list\n\n def _do_setup(self) -> np.ndarray:\n \"\"\"\n Computes the edge_list from the adjancency matrix\n\n Returns\n -------\n edge_list : np.ndarray, shape (n_verts, 2)\n The corresponding edge_list of adjacency\n \"\"\"\n\n # get edges for upper triangle of undirected graph\n row_inds, col_inds = np.nonzero(self.adjacency)\n upper = row_inds < col_inds\n row_inds = row_inds[upper]\n col_inds = col_inds[upper]\n edge_list = np.stack((row_inds, col_inds)).T\n return edge_list\n\n def swap_edges(self, n_swaps: int = 1) -> Tuple[AdjacencyMatrix, np.ndarray]:\n \"\"\"\n Performs a number of edge swaps on the graph\n\n Parameters\n ----------\n n_swaps : int (default 1), optional\n The number of edge swaps to be performed\n\n Returns\n -------\n adjacency : np.ndarray OR csr.matrix, shape (n_verts, n_verts)\n The adjancency matrix after a number of edge swaps are performed on the graph\n\n edge_list : np.ndarray (n_verts, 2)\n The edge_list after a number of edge swaps are perfomed on the graph\n \"\"\"\n\n # Note: for some reason could not get reproducibility w/o setting seed\n # inside of the _edge_swap_function itself\n max_seed = np.iinfo(np.int32).max\n for _ in range(n_swaps):\n self.adjacency, self.edge_list = self._edge_swap_function(\n self.adjacency,\n self.edge_list,\n seed=self._rng.integers(max_seed),\n )\n\n adjacency = self.adjacency\n if isinstance(adjacency, lil_matrix):\n adjacency = csr_matrix(adjacency)\n else:\n adjacency = adjacency.copy()\n\n return adjacency, self.edge_list.copy()\n\n\ndef _edge_swap(\n adjacency: AdjacencyMatrix, edge_list: np.ndarray, seed: Optional[int] = None\n) -> Tuple[AdjacencyMatrix, np.ndarray]:\n \"\"\"\n Performs the edge swap on the adjacency matrix. If adjacency is\n np.ndarray, then nopython=True is used in numba, but if adjacency\n is csr_matrix, then forceobj=True is used in numba\n\n Parameters\n ----------\n adjacency : np.ndarray OR csr_matrix, shape (n_verts, n_verts)\n The initial adjacency matrix in which edge swaps are performed on it\n\n edge_list : np.ndarray, shape (n_verts, 2)\n The corresponding edge_list of adjacency\n\n seed: int, optional\n Random seed to make outputs reproducible, must be positive\n\n Returns\n -------\n adjacency : np.ndarray OR csr_matrix, shape (n_verts, n_verts)\n The adjancency matrix after an edge swap is performed on the graph\n\n edge_list : np.ndarray (n_verts, 2)\n The edge_list after an edge swap is perfomed on the graph\n \"\"\"\n\n # need to use np.random here instead of the generator for numba compatibility\n if seed is not None:\n np.random.seed(seed)\n\n # choose two indices at random\n # NOTE: using np.random here for current numba compatibility\n orig_inds = np.random.choice(len(edge_list), size=2, replace=False)\n\n u, v = edge_list[orig_inds[0]]\n\n # two types of swap orientations for undirected graph\n if np.random.rand() < 0.5:\n x, y = edge_list[orig_inds[1]]\n else:\n y, x = edge_list[orig_inds[1]]\n\n # ensures no initial loops\n if u == v or x == y:\n return adjacency, edge_list\n\n # ensures no loops after swap (must be swap on 4 distinct nodes)\n if u == x or v == y:\n return adjacency, edge_list\n\n # save edge values\n w_ux = adjacency[u, x]\n w_vy = adjacency[v, y]\n\n # ensures no multigraphs after swap\n if w_ux >= 1 or w_vy >= 1:\n return adjacency, edge_list\n\n # perform the swap\n adjacency[u, v] = 0\n adjacency[v, u] = 0\n adjacency[x, y] = 0\n adjacency[y, x] = 0\n\n adjacency[u, x] = 1\n adjacency[x, u] = 1\n adjacency[v, y] = 1\n adjacency[y, v] = 1\n\n # update edge list\n edge_list[orig_inds[0]] = [u, x]\n edge_list[orig_inds[1]] = [v, y]\n return adjacency, edge_list\n", "path": "graspologic/models/edge_swaps.py"}]} | 3,002 | 195 |
gh_patches_debug_36010 | rasdani/github-patches | git_diff | cal-itp__benefits-396 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Track sign in status in user session
## Background
Once the user successfully signs in to their Login.gov account, we will receive a response from the authentication server indicating their status. We can store the fact the the user is then "signed in" in their session using the existing [`session` framework](https://github.com/cal-itp/benefits/blob/dev/benefits/core/session.py) which keeps everything local to the user in secure cookies.
This task is just about setting up the session code to be able to track this, but not actually getting the status in there (see #373 for that).
Check out #321 / #330 as an example of a similar addition last Sprint.
## Tasks
- [x] Add a [new key `_AUTH = "auth"`](https://github.com/cal-itp/benefits/blob/dev/benefits/core/session.py#L26) for use elsewhere
- [x] Create a new function `auth()` that uses the key to get the stored `bool` corresponding to whether the user is signed in via or not
- [x] Update the [`update()`](https://github.com/cal-itp/benefits/blob/dev/benefits/core/session.py#L192) function to accept a new kwarg `auth=False`; if it's a bool, store in the request's session using the key
- [x] Update the [`reset()`](https://github.com/cal-itp/benefits/blob/dev/benefits/core/session.py#L134) function to set the key to `False` by default
- [x] Update the [`context_dict()`](https://github.com/cal-itp/benefits/blob/dev/benefits/core/session.py#L52) function to add the new key/value - this shows up in the debug bar
</issue>
<code>
[start of benefits/core/session.py]
1 """
2 The core application: helpers to work with request sessions.
3 """
4 import hashlib
5 import logging
6 import time
7 import uuid
8
9 from django.urls import reverse
10
11 from benefits.settings import RATE_LIMIT_PERIOD
12 from . import models
13
14
15 logger = logging.getLogger(__name__)
16
17
18 _AGENCY = "agency"
19 _DEBUG = "debug"
20 _DID = "did"
21 _ELIGIBILITY = "eligibility"
22 _LANG = "lang"
23 _LIMITCOUNTER = "limitcounter"
24 _LIMITUNTIL = "limituntil"
25 _ORIGIN = "origin"
26 _START = "start"
27 _UID = "uid"
28 _VERIFIER = "verifier"
29
30 # ignore bandit B105:hardcoded_password_string
31 # as these are not passwords, but keys for the session dict
32 _TOKEN = "token" # nosec
33 _TOKEN_EXP = "token_exp" # nosec
34
35
36 def agency(request):
37 """Get the agency from the request's session, or None"""
38 logger.debug("Get session agency")
39 try:
40 return models.TransitAgency.by_id(request.session[_AGENCY])
41 except (KeyError, models.TransitAgency.DoesNotExist):
42 logger.debug("Can't get agency from session")
43 return None
44
45
46 def active_agency(request):
47 """True if the request's session is configured with an active agency. False otherwise."""
48 logger.debug("Get session active agency flag")
49 a = agency(request)
50 return a and a.active
51
52
53 def context_dict(request):
54 """The request's session context as a dict."""
55 logger.debug("Get session context dict")
56 return {
57 _AGENCY: agency(request).slug if active_agency(request) else None,
58 _LIMITCOUNTER: rate_limit_counter(request),
59 _DEBUG: debug(request),
60 _DID: did(request),
61 _ELIGIBILITY: eligibility(request),
62 _LANG: language(request),
63 _ORIGIN: origin(request),
64 _LIMITUNTIL: rate_limit_time(request),
65 _START: start(request),
66 _TOKEN: token(request),
67 _TOKEN_EXP: token_expiry(request),
68 _UID: uid(request),
69 _VERIFIER: verifier(request),
70 }
71
72
73 def debug(request):
74 """Get the DEBUG flag from the request's session."""
75 logger.debug("Get session debug flag")
76 return bool(request.session.get(_DEBUG, False))
77
78
79 def did(request):
80 """Get the session's device ID, a hashed version of the unique ID."""
81 logger.debug("Get session did")
82 d = request.session.get(_DID)
83 if not d:
84 reset(request)
85 d = request.session.get(_DID)
86 return str(d)
87
88
89 def eligibility(request):
90 """Get the confirmed models.EligibilityType from the request's session, or None"""
91 logger.debug("Get session confirmed eligibility")
92 eligibility = request.session.get(_ELIGIBILITY)
93 if eligibility:
94 return models.EligibilityType.get(eligibility)
95 else:
96 return None
97
98
99 def eligible(request):
100 """True if the request's session is configured with an active agency and has confirmed eligibility. False otherwise."""
101 logger.debug("Get session eligible flag")
102 return active_agency(request) and agency(request).supports_type(eligibility(request))
103
104
105 def increment_rate_limit_counter(request):
106 """Adds 1 to this session's rate limit counter."""
107 logger.debug("Increment rate limit counter")
108 c = rate_limit_counter(request)
109 request.session[_LIMITCOUNTER] = int(c) + 1
110
111
112 def language(request):
113 """Get the language configured for the request."""
114 logger.debug("Get session language")
115 return request.LANGUAGE_CODE
116
117
118 def origin(request):
119 """Get the origin for the request's session, or None."""
120 logger.debug("Get session origin")
121 return request.session.get(_ORIGIN)
122
123
124 def rate_limit_counter(request):
125 """Get this session's rate limit counter."""
126 logger.debug("Get rate limit counter")
127 return request.session.get(_LIMITCOUNTER)
128
129
130 def rate_limit_time(request):
131 """Get this session's rate limit time, a Unix timestamp after which the session's rate limt resets."""
132 logger.debug("Get rate limit time")
133 return request.session.get(_LIMITUNTIL)
134
135
136 def reset(request):
137 """Reset the session for the request."""
138 logger.debug("Reset session")
139 request.session[_AGENCY] = None
140 request.session[_ELIGIBILITY] = None
141 request.session[_ORIGIN] = reverse("core:index")
142 request.session[_TOKEN] = None
143 request.session[_TOKEN_EXP] = None
144 request.session[_VERIFIER] = None
145
146 if _UID not in request.session or not request.session[_UID]:
147 logger.debug("Reset session time and uid")
148 request.session[_START] = int(time.time() * 1000)
149 u = str(uuid.uuid4())
150 request.session[_UID] = u
151 request.session[_DID] = str(uuid.UUID(hashlib.sha512(bytes(u, "utf8")).hexdigest()[:32]))
152 reset_rate_limit(request)
153
154
155 def reset_rate_limit(request):
156 """Reset this session's rate limit counter and time."""
157 logger.debug("Reset rate limit")
158 request.session[_LIMITCOUNTER] = 0
159 # get the current time in Unix seconds, then add RATE_LIMIT_PERIOD seconds
160 request.session[_LIMITUNTIL] = int(time.time()) + RATE_LIMIT_PERIOD
161
162
163 def start(request):
164 """Get the start time from the request's session, as integer milliseconds since Epoch."""
165 logger.debug("Get session time")
166 s = request.session.get(_START)
167 if not s:
168 reset(request)
169 s = request.session.get(_START)
170 return s
171
172
173 def token(request):
174 """Get the token from the request's session, or None."""
175 logger.debug("Get session token")
176 return request.session.get(_TOKEN)
177
178
179 def token_expiry(request):
180 """Get the token's expiry time from the request's session, or None."""
181 logger.debug("Get session token expiry")
182 return request.session.get(_TOKEN_EXP)
183
184
185 def uid(request):
186 """Get the session's unique ID, generating a new one if necessary."""
187 logger.debug("Get session uid")
188 u = request.session.get(_UID)
189 if not u:
190 reset(request)
191 u = request.session.get(_UID)
192 return u
193
194
195 def update(request, agency=None, debug=None, eligibility_types=None, origin=None, token=None, token_exp=None, verifier=None):
196 """Update the request's session with non-null values."""
197 if agency is not None and isinstance(agency, models.TransitAgency):
198 logger.debug(f"Update session {_AGENCY}")
199 request.session[_AGENCY] = agency.id
200 if debug is not None:
201 logger.debug(f"Update session {_DEBUG}")
202 request.session[_DEBUG] = debug
203 if eligibility_types is not None and isinstance(eligibility_types, list):
204 logger.debug(f"Update session {_ELIGIBILITY}")
205 if len(eligibility_types) > 1:
206 raise NotImplementedError("Multiple eligibilities are not supported at this time.")
207 elif len(eligibility_types) == 1:
208 # get the eligibility corresponding to the session's agency
209 a = models.TransitAgency.by_id(request.session[_AGENCY])
210 t = str(eligibility_types[0]).strip()
211 request.session[_ELIGIBILITY] = a.get_type_id(t)
212 else:
213 # empty list, clear session eligibility
214 request.session[_ELIGIBILITY] = None
215 if origin is not None:
216 logger.debug(f"Update session {_ORIGIN}")
217 request.session[_ORIGIN] = origin
218 if token is not None:
219 logger.debug(f"Update session {_TOKEN}")
220 request.session[_TOKEN] = token
221 request.session[_TOKEN_EXP] = token_exp
222 if verifier is not None and isinstance(verifier, models.EligibilityVerifier):
223 logger.debug(f"Update session {_VERIFIER}")
224 request.session[_VERIFIER] = verifier.id
225
226
227 def valid_token(request):
228 """True if the request's session is configured with a valid token. False otherwise."""
229 if token(request) is not None:
230 logger.debug("Session contains a token")
231 exp = token_expiry(request)
232
233 # ensure token does not expire in the next 5 seconds
234 valid = exp is None or exp > (time.time() + 5)
235
236 logger.debug(f"Session token is {'valid' if valid else 'expired'}")
237 return valid
238 else:
239 logger.debug("Session does not contain a valid token")
240 return False
241
242
243 def verifier(request):
244 """Get the verifier from the request's session, or None"""
245 logger.debug("Get session verifier")
246 try:
247 return models.EligibilityVerifier.by_id(request.session[_VERIFIER])
248 except (KeyError, models.EligibilityVerifier.DoesNotExist):
249 logger.debug("Can't get verifier from session")
250 return None
251
[end of benefits/core/session.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/benefits/core/session.py b/benefits/core/session.py
--- a/benefits/core/session.py
+++ b/benefits/core/session.py
@@ -16,6 +16,7 @@
_AGENCY = "agency"
+_AUTH = "auth"
_DEBUG = "debug"
_DID = "did"
_ELIGIBILITY = "eligibility"
@@ -50,11 +51,18 @@
return a and a.active
+def auth(request):
+ """Get the auth from the request's session, or None"""
+ logger.debug("Get session auth")
+ return request.session.get(_AUTH)
+
+
def context_dict(request):
"""The request's session context as a dict."""
logger.debug("Get session context dict")
return {
_AGENCY: agency(request).slug if active_agency(request) else None,
+ _AUTH: auth(request),
_LIMITCOUNTER: rate_limit_counter(request),
_DEBUG: debug(request),
_DID: did(request),
@@ -137,6 +145,7 @@
"""Reset the session for the request."""
logger.debug("Reset session")
request.session[_AGENCY] = None
+ request.session[_AUTH] = False
request.session[_ELIGIBILITY] = None
request.session[_ORIGIN] = reverse("core:index")
request.session[_TOKEN] = None
@@ -192,11 +201,24 @@
return u
-def update(request, agency=None, debug=None, eligibility_types=None, origin=None, token=None, token_exp=None, verifier=None):
+def update(
+ request,
+ agency=None,
+ auth=None,
+ debug=None,
+ eligibility_types=None,
+ origin=None,
+ token=None,
+ token_exp=None,
+ verifier=None,
+):
"""Update the request's session with non-null values."""
if agency is not None and isinstance(agency, models.TransitAgency):
logger.debug(f"Update session {_AGENCY}")
request.session[_AGENCY] = agency.id
+ if auth is not None and type(auth) == bool:
+ logger.debug(f"Update session {_AUTH}")
+ request.session[_AUTH] = auth
if debug is not None:
logger.debug(f"Update session {_DEBUG}")
request.session[_DEBUG] = debug
| {"golden_diff": "diff --git a/benefits/core/session.py b/benefits/core/session.py\n--- a/benefits/core/session.py\n+++ b/benefits/core/session.py\n@@ -16,6 +16,7 @@\n \n \n _AGENCY = \"agency\"\n+_AUTH = \"auth\"\n _DEBUG = \"debug\"\n _DID = \"did\"\n _ELIGIBILITY = \"eligibility\"\n@@ -50,11 +51,18 @@\n return a and a.active\n \n \n+def auth(request):\n+ \"\"\"Get the auth from the request's session, or None\"\"\"\n+ logger.debug(\"Get session auth\")\n+ return request.session.get(_AUTH)\n+\n+\n def context_dict(request):\n \"\"\"The request's session context as a dict.\"\"\"\n logger.debug(\"Get session context dict\")\n return {\n _AGENCY: agency(request).slug if active_agency(request) else None,\n+ _AUTH: auth(request),\n _LIMITCOUNTER: rate_limit_counter(request),\n _DEBUG: debug(request),\n _DID: did(request),\n@@ -137,6 +145,7 @@\n \"\"\"Reset the session for the request.\"\"\"\n logger.debug(\"Reset session\")\n request.session[_AGENCY] = None\n+ request.session[_AUTH] = False\n request.session[_ELIGIBILITY] = None\n request.session[_ORIGIN] = reverse(\"core:index\")\n request.session[_TOKEN] = None\n@@ -192,11 +201,24 @@\n return u\n \n \n-def update(request, agency=None, debug=None, eligibility_types=None, origin=None, token=None, token_exp=None, verifier=None):\n+def update(\n+ request,\n+ agency=None,\n+ auth=None,\n+ debug=None,\n+ eligibility_types=None,\n+ origin=None,\n+ token=None,\n+ token_exp=None,\n+ verifier=None,\n+):\n \"\"\"Update the request's session with non-null values.\"\"\"\n if agency is not None and isinstance(agency, models.TransitAgency):\n logger.debug(f\"Update session {_AGENCY}\")\n request.session[_AGENCY] = agency.id\n+ if auth is not None and type(auth) == bool:\n+ logger.debug(f\"Update session {_AUTH}\")\n+ request.session[_AUTH] = auth\n if debug is not None:\n logger.debug(f\"Update session {_DEBUG}\")\n request.session[_DEBUG] = debug\n", "issue": "Track sign in status in user session\n## Background\r\n\r\nOnce the user successfully signs in to their Login.gov account, we will receive a response from the authentication server indicating their status. We can store the fact the the user is then \"signed in\" in their session using the existing [`session` framework](https://github.com/cal-itp/benefits/blob/dev/benefits/core/session.py) which keeps everything local to the user in secure cookies.\r\n\r\nThis task is just about setting up the session code to be able to track this, but not actually getting the status in there (see #373 for that).\r\n\r\nCheck out #321 / #330 as an example of a similar addition last Sprint.\r\n\r\n## Tasks\r\n\r\n- [x] Add a [new key `_AUTH = \"auth\"`](https://github.com/cal-itp/benefits/blob/dev/benefits/core/session.py#L26) for use elsewhere\r\n- [x] Create a new function `auth()` that uses the key to get the stored `bool` corresponding to whether the user is signed in via or not\r\n- [x] Update the [`update()`](https://github.com/cal-itp/benefits/blob/dev/benefits/core/session.py#L192) function to accept a new kwarg `auth=False`; if it's a bool, store in the request's session using the key\r\n- [x] Update the [`reset()`](https://github.com/cal-itp/benefits/blob/dev/benefits/core/session.py#L134) function to set the key to `False` by default\r\n- [x] Update the [`context_dict()`](https://github.com/cal-itp/benefits/blob/dev/benefits/core/session.py#L52) function to add the new key/value - this shows up in the debug bar\n", "before_files": [{"content": "\"\"\"\nThe core application: helpers to work with request sessions.\n\"\"\"\nimport hashlib\nimport logging\nimport time\nimport uuid\n\nfrom django.urls import reverse\n\nfrom benefits.settings import RATE_LIMIT_PERIOD\nfrom . import models\n\n\nlogger = logging.getLogger(__name__)\n\n\n_AGENCY = \"agency\"\n_DEBUG = \"debug\"\n_DID = \"did\"\n_ELIGIBILITY = \"eligibility\"\n_LANG = \"lang\"\n_LIMITCOUNTER = \"limitcounter\"\n_LIMITUNTIL = \"limituntil\"\n_ORIGIN = \"origin\"\n_START = \"start\"\n_UID = \"uid\"\n_VERIFIER = \"verifier\"\n\n# ignore bandit B105:hardcoded_password_string\n# as these are not passwords, but keys for the session dict\n_TOKEN = \"token\" # nosec\n_TOKEN_EXP = \"token_exp\" # nosec\n\n\ndef agency(request):\n \"\"\"Get the agency from the request's session, or None\"\"\"\n logger.debug(\"Get session agency\")\n try:\n return models.TransitAgency.by_id(request.session[_AGENCY])\n except (KeyError, models.TransitAgency.DoesNotExist):\n logger.debug(\"Can't get agency from session\")\n return None\n\n\ndef active_agency(request):\n \"\"\"True if the request's session is configured with an active agency. False otherwise.\"\"\"\n logger.debug(\"Get session active agency flag\")\n a = agency(request)\n return a and a.active\n\n\ndef context_dict(request):\n \"\"\"The request's session context as a dict.\"\"\"\n logger.debug(\"Get session context dict\")\n return {\n _AGENCY: agency(request).slug if active_agency(request) else None,\n _LIMITCOUNTER: rate_limit_counter(request),\n _DEBUG: debug(request),\n _DID: did(request),\n _ELIGIBILITY: eligibility(request),\n _LANG: language(request),\n _ORIGIN: origin(request),\n _LIMITUNTIL: rate_limit_time(request),\n _START: start(request),\n _TOKEN: token(request),\n _TOKEN_EXP: token_expiry(request),\n _UID: uid(request),\n _VERIFIER: verifier(request),\n }\n\n\ndef debug(request):\n \"\"\"Get the DEBUG flag from the request's session.\"\"\"\n logger.debug(\"Get session debug flag\")\n return bool(request.session.get(_DEBUG, False))\n\n\ndef did(request):\n \"\"\"Get the session's device ID, a hashed version of the unique ID.\"\"\"\n logger.debug(\"Get session did\")\n d = request.session.get(_DID)\n if not d:\n reset(request)\n d = request.session.get(_DID)\n return str(d)\n\n\ndef eligibility(request):\n \"\"\"Get the confirmed models.EligibilityType from the request's session, or None\"\"\"\n logger.debug(\"Get session confirmed eligibility\")\n eligibility = request.session.get(_ELIGIBILITY)\n if eligibility:\n return models.EligibilityType.get(eligibility)\n else:\n return None\n\n\ndef eligible(request):\n \"\"\"True if the request's session is configured with an active agency and has confirmed eligibility. False otherwise.\"\"\"\n logger.debug(\"Get session eligible flag\")\n return active_agency(request) and agency(request).supports_type(eligibility(request))\n\n\ndef increment_rate_limit_counter(request):\n \"\"\"Adds 1 to this session's rate limit counter.\"\"\"\n logger.debug(\"Increment rate limit counter\")\n c = rate_limit_counter(request)\n request.session[_LIMITCOUNTER] = int(c) + 1\n\n\ndef language(request):\n \"\"\"Get the language configured for the request.\"\"\"\n logger.debug(\"Get session language\")\n return request.LANGUAGE_CODE\n\n\ndef origin(request):\n \"\"\"Get the origin for the request's session, or None.\"\"\"\n logger.debug(\"Get session origin\")\n return request.session.get(_ORIGIN)\n\n\ndef rate_limit_counter(request):\n \"\"\"Get this session's rate limit counter.\"\"\"\n logger.debug(\"Get rate limit counter\")\n return request.session.get(_LIMITCOUNTER)\n\n\ndef rate_limit_time(request):\n \"\"\"Get this session's rate limit time, a Unix timestamp after which the session's rate limt resets.\"\"\"\n logger.debug(\"Get rate limit time\")\n return request.session.get(_LIMITUNTIL)\n\n\ndef reset(request):\n \"\"\"Reset the session for the request.\"\"\"\n logger.debug(\"Reset session\")\n request.session[_AGENCY] = None\n request.session[_ELIGIBILITY] = None\n request.session[_ORIGIN] = reverse(\"core:index\")\n request.session[_TOKEN] = None\n request.session[_TOKEN_EXP] = None\n request.session[_VERIFIER] = None\n\n if _UID not in request.session or not request.session[_UID]:\n logger.debug(\"Reset session time and uid\")\n request.session[_START] = int(time.time() * 1000)\n u = str(uuid.uuid4())\n request.session[_UID] = u\n request.session[_DID] = str(uuid.UUID(hashlib.sha512(bytes(u, \"utf8\")).hexdigest()[:32]))\n reset_rate_limit(request)\n\n\ndef reset_rate_limit(request):\n \"\"\"Reset this session's rate limit counter and time.\"\"\"\n logger.debug(\"Reset rate limit\")\n request.session[_LIMITCOUNTER] = 0\n # get the current time in Unix seconds, then add RATE_LIMIT_PERIOD seconds\n request.session[_LIMITUNTIL] = int(time.time()) + RATE_LIMIT_PERIOD\n\n\ndef start(request):\n \"\"\"Get the start time from the request's session, as integer milliseconds since Epoch.\"\"\"\n logger.debug(\"Get session time\")\n s = request.session.get(_START)\n if not s:\n reset(request)\n s = request.session.get(_START)\n return s\n\n\ndef token(request):\n \"\"\"Get the token from the request's session, or None.\"\"\"\n logger.debug(\"Get session token\")\n return request.session.get(_TOKEN)\n\n\ndef token_expiry(request):\n \"\"\"Get the token's expiry time from the request's session, or None.\"\"\"\n logger.debug(\"Get session token expiry\")\n return request.session.get(_TOKEN_EXP)\n\n\ndef uid(request):\n \"\"\"Get the session's unique ID, generating a new one if necessary.\"\"\"\n logger.debug(\"Get session uid\")\n u = request.session.get(_UID)\n if not u:\n reset(request)\n u = request.session.get(_UID)\n return u\n\n\ndef update(request, agency=None, debug=None, eligibility_types=None, origin=None, token=None, token_exp=None, verifier=None):\n \"\"\"Update the request's session with non-null values.\"\"\"\n if agency is not None and isinstance(agency, models.TransitAgency):\n logger.debug(f\"Update session {_AGENCY}\")\n request.session[_AGENCY] = agency.id\n if debug is not None:\n logger.debug(f\"Update session {_DEBUG}\")\n request.session[_DEBUG] = debug\n if eligibility_types is not None and isinstance(eligibility_types, list):\n logger.debug(f\"Update session {_ELIGIBILITY}\")\n if len(eligibility_types) > 1:\n raise NotImplementedError(\"Multiple eligibilities are not supported at this time.\")\n elif len(eligibility_types) == 1:\n # get the eligibility corresponding to the session's agency\n a = models.TransitAgency.by_id(request.session[_AGENCY])\n t = str(eligibility_types[0]).strip()\n request.session[_ELIGIBILITY] = a.get_type_id(t)\n else:\n # empty list, clear session eligibility\n request.session[_ELIGIBILITY] = None\n if origin is not None:\n logger.debug(f\"Update session {_ORIGIN}\")\n request.session[_ORIGIN] = origin\n if token is not None:\n logger.debug(f\"Update session {_TOKEN}\")\n request.session[_TOKEN] = token\n request.session[_TOKEN_EXP] = token_exp\n if verifier is not None and isinstance(verifier, models.EligibilityVerifier):\n logger.debug(f\"Update session {_VERIFIER}\")\n request.session[_VERIFIER] = verifier.id\n\n\ndef valid_token(request):\n \"\"\"True if the request's session is configured with a valid token. False otherwise.\"\"\"\n if token(request) is not None:\n logger.debug(\"Session contains a token\")\n exp = token_expiry(request)\n\n # ensure token does not expire in the next 5 seconds\n valid = exp is None or exp > (time.time() + 5)\n\n logger.debug(f\"Session token is {'valid' if valid else 'expired'}\")\n return valid\n else:\n logger.debug(\"Session does not contain a valid token\")\n return False\n\n\ndef verifier(request):\n \"\"\"Get the verifier from the request's session, or None\"\"\"\n logger.debug(\"Get session verifier\")\n try:\n return models.EligibilityVerifier.by_id(request.session[_VERIFIER])\n except (KeyError, models.EligibilityVerifier.DoesNotExist):\n logger.debug(\"Can't get verifier from session\")\n return None\n", "path": "benefits/core/session.py"}]} | 3,468 | 520 |
gh_patches_debug_36715 | rasdani/github-patches | git_diff | Mailu__Mailu-1968 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Insufficient input validation in /internal endpoint
It seems like it is possible to crash a thread of flask by sending an invalid utf-8 username.
As far as i can see, this can not be used to gain access (should be validated).
Here are logs to reproduce this.
[error] 56#56: *1312970 auth http server 127.0.0.1:8000 did not send server or port while in http auth state, client: xx.xx.xx.xx, server: 0.0.0.0:25, login: "xxxxxxxxx\[email protected]"
GET /internal/auth/email HTTP/1.0" 500 290 "-" "-"
ERROR in app: Exception on /internal/auth/email [GET]
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/usr/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python3.7/site-packages/flask_limiter/extension.py", line 544, in __inner
return obj(*a, **k)
File "/app/mailu/internal/views/auth.py", line 18, in nginx_authentication
headers = nginx.handle_authentication(flask.request.headers)
File "/app/mailu/internal/nginx.py", line 45, in handle_authentication
user_email = raw_user_email.encode("iso8859-1").decode("utf8")
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xed in position 6: invalid continuation byte
xx.xx.xx.xx - - [xx/xx/xxxx:xx:xx:xx +0000] "GET /internal/auth/email HTTP/1.0" 500 290 "-" "-"
</issue>
<code>
[start of core/admin/mailu/internal/nginx.py]
1 from mailu import models
2 from flask import current_app as app
3
4 import re
5 import urllib
6 import ipaddress
7 import socket
8 import tenacity
9
10 SUPPORTED_AUTH_METHODS = ["none", "plain"]
11
12
13 STATUSES = {
14 "authentication": ("Authentication credentials invalid", {
15 "imap": "AUTHENTICATIONFAILED",
16 "smtp": "535 5.7.8",
17 "pop3": "-ERR Authentication failed"
18 }),
19 "encryption": ("Must issue a STARTTLS command first", {
20 "smtp": "530 5.7.0"
21 }),
22 }
23
24 def check_credentials(user, password, ip, protocol=None):
25 if not user or not user.enabled or (protocol == "imap" and not user.enable_imap) or (protocol == "pop3" and not user.enable_pop):
26 return False
27 is_ok = False
28 # webmails
29 if len(password) == 64 and ip == app.config['WEBMAIL_ADDRESS']:
30 if user.verify_temp_token(password):
31 is_ok = True
32 # All tokens are 32 characters hex lowercase
33 if not is_ok and len(password) == 32:
34 for token in user.tokens:
35 if (token.check_password(password) and
36 (not token.ip or token.ip == ip)):
37 is_ok = True
38 break
39 if not is_ok and user.check_password(password):
40 is_ok = True
41 return is_ok
42
43 def handle_authentication(headers):
44 """ Handle an HTTP nginx authentication request
45 See: http://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#protocol
46 """
47 method = headers["Auth-Method"]
48 protocol = headers["Auth-Protocol"]
49 # Incoming mail, no authentication
50 if method == "none" and protocol == "smtp":
51 server, port = get_server(protocol, False)
52 if app.config["INBOUND_TLS_ENFORCE"]:
53 if "Auth-SSL" in headers and headers["Auth-SSL"] == "on":
54 return {
55 "Auth-Status": "OK",
56 "Auth-Server": server,
57 "Auth-Port": port
58 }
59 else:
60 status, code = get_status(protocol, "encryption")
61 return {
62 "Auth-Status": status,
63 "Auth-Error-Code" : code,
64 "Auth-Wait": 0
65 }
66 else:
67 return {
68 "Auth-Status": "OK",
69 "Auth-Server": server,
70 "Auth-Port": port
71 }
72 # Authenticated user
73 elif method == "plain":
74 server, port = get_server(headers["Auth-Protocol"], True)
75 # According to RFC2616 section 3.7.1 and PEP 3333, HTTP headers should
76 # be ASCII and are generally considered ISO8859-1. However when passing
77 # the password, nginx does not transcode the input UTF string, thus
78 # we need to manually decode.
79 raw_user_email = urllib.parse.unquote(headers["Auth-User"])
80 user_email = raw_user_email.encode("iso8859-1").decode("utf8")
81 raw_password = urllib.parse.unquote(headers["Auth-Pass"])
82 password = raw_password.encode("iso8859-1").decode("utf8")
83 ip = urllib.parse.unquote(headers["Client-Ip"])
84 service_port = int(urllib.parse.unquote(headers["Auth-Port"]))
85 if service_port == 25:
86 return {
87 "Auth-Status": "AUTH not supported",
88 "Auth-Error-Code": "502 5.5.1",
89 "Auth-Wait": 0
90 }
91 user = models.User.query.get(user_email)
92 if check_credentials(user, password, ip, protocol):
93 return {
94 "Auth-Status": "OK",
95 "Auth-Server": server,
96 "Auth-Port": port
97 }
98 else:
99 status, code = get_status(protocol, "authentication")
100 return {
101 "Auth-Status": status,
102 "Auth-Error-Code": code,
103 "Auth-Wait": 0
104 }
105 # Unexpected
106 return {}
107
108
109 def get_status(protocol, status):
110 """ Return the proper error code depending on the protocol
111 """
112 status, codes = STATUSES[status]
113 return status, codes[protocol]
114
115 def extract_host_port(host_and_port, default_port):
116 host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()
117 return host, int(port) if port else default_port
118
119 def get_server(protocol, authenticated=False):
120 if protocol == "imap":
121 hostname, port = extract_host_port(app.config['IMAP_ADDRESS'], 143)
122 elif protocol == "pop3":
123 hostname, port = extract_host_port(app.config['POP3_ADDRESS'], 110)
124 elif protocol == "smtp":
125 if authenticated:
126 hostname, port = extract_host_port(app.config['AUTHSMTP_ADDRESS'], 10025)
127 else:
128 hostname, port = extract_host_port(app.config['SMTP_ADDRESS'], 25)
129 try:
130 # test if hostname is already resolved to an ip adddress
131 ipaddress.ip_address(hostname)
132 except:
133 # hostname is not an ip address - so we need to resolve it
134 hostname = resolve_hostname(hostname)
135 return hostname, port
136
137 @tenacity.retry(stop=tenacity.stop_after_attempt(100),
138 wait=tenacity.wait_random(min=2, max=5))
139 def resolve_hostname(hostname):
140 """ This function uses system DNS to resolve a hostname.
141 It is capable of retrying in case the host is not immediately available
142 """
143 return socket.gethostbyname(hostname)
144
[end of core/admin/mailu/internal/nginx.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py
--- a/core/admin/mailu/internal/nginx.py
+++ b/core/admin/mailu/internal/nginx.py
@@ -71,16 +71,6 @@
}
# Authenticated user
elif method == "plain":
- server, port = get_server(headers["Auth-Protocol"], True)
- # According to RFC2616 section 3.7.1 and PEP 3333, HTTP headers should
- # be ASCII and are generally considered ISO8859-1. However when passing
- # the password, nginx does not transcode the input UTF string, thus
- # we need to manually decode.
- raw_user_email = urllib.parse.unquote(headers["Auth-User"])
- user_email = raw_user_email.encode("iso8859-1").decode("utf8")
- raw_password = urllib.parse.unquote(headers["Auth-Pass"])
- password = raw_password.encode("iso8859-1").decode("utf8")
- ip = urllib.parse.unquote(headers["Client-Ip"])
service_port = int(urllib.parse.unquote(headers["Auth-Port"]))
if service_port == 25:
return {
@@ -88,20 +78,33 @@
"Auth-Error-Code": "502 5.5.1",
"Auth-Wait": 0
}
- user = models.User.query.get(user_email)
- if check_credentials(user, password, ip, protocol):
- return {
- "Auth-Status": "OK",
- "Auth-Server": server,
- "Auth-Port": port
- }
+ # According to RFC2616 section 3.7.1 and PEP 3333, HTTP headers should
+ # be ASCII and are generally considered ISO8859-1. However when passing
+ # the password, nginx does not transcode the input UTF string, thus
+ # we need to manually decode.
+ raw_user_email = urllib.parse.unquote(headers["Auth-User"])
+ raw_password = urllib.parse.unquote(headers["Auth-Pass"])
+ try:
+ user_email = raw_user_email.encode("iso8859-1").decode("utf8")
+ password = raw_password.encode("iso8859-1").decode("utf8")
+ except:
+ app.logger.warn(f'Received undecodable user/password from nginx: {raw_user_email!r}/{raw_password!r}')
else:
- status, code = get_status(protocol, "authentication")
- return {
- "Auth-Status": status,
- "Auth-Error-Code": code,
- "Auth-Wait": 0
- }
+ user = models.User.query.get(user_email)
+ ip = urllib.parse.unquote(headers["Client-Ip"])
+ if check_credentials(user, password, ip, protocol):
+ server, port = get_server(headers["Auth-Protocol"], True)
+ return {
+ "Auth-Status": "OK",
+ "Auth-Server": server,
+ "Auth-Port": port
+ }
+ status, code = get_status(protocol, "authentication")
+ return {
+ "Auth-Status": status,
+ "Auth-Error-Code": code,
+ "Auth-Wait": 0
+ }
# Unexpected
return {}
| {"golden_diff": "diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py\n--- a/core/admin/mailu/internal/nginx.py\n+++ b/core/admin/mailu/internal/nginx.py\n@@ -71,16 +71,6 @@\n }\n # Authenticated user\n elif method == \"plain\":\n- server, port = get_server(headers[\"Auth-Protocol\"], True)\n- # According to RFC2616 section 3.7.1 and PEP 3333, HTTP headers should\n- # be ASCII and are generally considered ISO8859-1. However when passing\n- # the password, nginx does not transcode the input UTF string, thus\n- # we need to manually decode.\n- raw_user_email = urllib.parse.unquote(headers[\"Auth-User\"])\n- user_email = raw_user_email.encode(\"iso8859-1\").decode(\"utf8\")\n- raw_password = urllib.parse.unquote(headers[\"Auth-Pass\"])\n- password = raw_password.encode(\"iso8859-1\").decode(\"utf8\")\n- ip = urllib.parse.unquote(headers[\"Client-Ip\"])\n service_port = int(urllib.parse.unquote(headers[\"Auth-Port\"]))\n if service_port == 25:\n return {\n@@ -88,20 +78,33 @@\n \"Auth-Error-Code\": \"502 5.5.1\",\n \"Auth-Wait\": 0\n }\n- user = models.User.query.get(user_email)\n- if check_credentials(user, password, ip, protocol):\n- return {\n- \"Auth-Status\": \"OK\",\n- \"Auth-Server\": server,\n- \"Auth-Port\": port\n- }\n+ # According to RFC2616 section 3.7.1 and PEP 3333, HTTP headers should\n+ # be ASCII and are generally considered ISO8859-1. However when passing\n+ # the password, nginx does not transcode the input UTF string, thus\n+ # we need to manually decode.\n+ raw_user_email = urllib.parse.unquote(headers[\"Auth-User\"])\n+ raw_password = urllib.parse.unquote(headers[\"Auth-Pass\"])\n+ try:\n+ user_email = raw_user_email.encode(\"iso8859-1\").decode(\"utf8\")\n+ password = raw_password.encode(\"iso8859-1\").decode(\"utf8\")\n+ except:\n+ app.logger.warn(f'Received undecodable user/password from nginx: {raw_user_email!r}/{raw_password!r}')\n else:\n- status, code = get_status(protocol, \"authentication\")\n- return {\n- \"Auth-Status\": status,\n- \"Auth-Error-Code\": code,\n- \"Auth-Wait\": 0\n- }\n+ user = models.User.query.get(user_email)\n+ ip = urllib.parse.unquote(headers[\"Client-Ip\"])\n+ if check_credentials(user, password, ip, protocol):\n+ server, port = get_server(headers[\"Auth-Protocol\"], True)\n+ return {\n+ \"Auth-Status\": \"OK\",\n+ \"Auth-Server\": server,\n+ \"Auth-Port\": port\n+ }\n+ status, code = get_status(protocol, \"authentication\")\n+ return {\n+ \"Auth-Status\": status,\n+ \"Auth-Error-Code\": code,\n+ \"Auth-Wait\": 0\n+ }\n # Unexpected\n return {}\n", "issue": "Insufficient input validation in /internal endpoint\nIt seems like it is possible to crash a thread of flask by sending an invalid utf-8 username.\r\nAs far as i can see, this can not be used to gain access (should be validated).\r\n\r\nHere are logs to reproduce this.\r\n\r\n[error] 56#56: *1312970 auth http server 127.0.0.1:8000 did not send server or port while in http auth state, client: xx.xx.xx.xx, server: 0.0.0.0:25, login: \"xxxxxxxxx\\[email protected]\"\r\n\r\nGET /internal/auth/email HTTP/1.0\" 500 290 \"-\" \"-\"\r\n\r\nERROR in app: Exception on /internal/auth/email [GET]\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.7/site-packages/flask/app.py\", line 2292, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/usr/lib/python3.7/site-packages/flask/app.py\", line 1815, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/usr/lib/python3.7/site-packages/flask/app.py\", line 1718, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/usr/lib/python3.7/site-packages/flask/_compat.py\", line 35, in reraise\r\n raise value\r\n File \"/usr/lib/python3.7/site-packages/flask/app.py\", line 1813, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/usr/lib/python3.7/site-packages/flask/app.py\", line 1799, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/usr/lib/python3.7/site-packages/flask_limiter/extension.py\", line 544, in __inner\r\n return obj(*a, **k)\r\n File \"/app/mailu/internal/views/auth.py\", line 18, in nginx_authentication\r\n headers = nginx.handle_authentication(flask.request.headers)\r\n File \"/app/mailu/internal/nginx.py\", line 45, in handle_authentication\r\n user_email = raw_user_email.encode(\"iso8859-1\").decode(\"utf8\")\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xed in position 6: invalid continuation byte\r\n\r\nxx.xx.xx.xx - - [xx/xx/xxxx:xx:xx:xx +0000] \"GET /internal/auth/email HTTP/1.0\" 500 290 \"-\" \"-\"\r\n\n", "before_files": [{"content": "from mailu import models\nfrom flask import current_app as app\n\nimport re\nimport urllib\nimport ipaddress\nimport socket\nimport tenacity\n\nSUPPORTED_AUTH_METHODS = [\"none\", \"plain\"]\n\n\nSTATUSES = {\n \"authentication\": (\"Authentication credentials invalid\", {\n \"imap\": \"AUTHENTICATIONFAILED\",\n \"smtp\": \"535 5.7.8\",\n \"pop3\": \"-ERR Authentication failed\"\n }),\n \"encryption\": (\"Must issue a STARTTLS command first\", {\n \"smtp\": \"530 5.7.0\"\n }),\n}\n\ndef check_credentials(user, password, ip, protocol=None):\n if not user or not user.enabled or (protocol == \"imap\" and not user.enable_imap) or (protocol == \"pop3\" and not user.enable_pop):\n return False\n is_ok = False\n # webmails\n if len(password) == 64 and ip == app.config['WEBMAIL_ADDRESS']:\n if user.verify_temp_token(password):\n is_ok = True\n # All tokens are 32 characters hex lowercase\n if not is_ok and len(password) == 32:\n for token in user.tokens:\n if (token.check_password(password) and\n (not token.ip or token.ip == ip)):\n is_ok = True\n break\n if not is_ok and user.check_password(password):\n is_ok = True\n return is_ok\n\ndef handle_authentication(headers):\n \"\"\" Handle an HTTP nginx authentication request\n See: http://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#protocol\n \"\"\"\n method = headers[\"Auth-Method\"]\n protocol = headers[\"Auth-Protocol\"]\n # Incoming mail, no authentication\n if method == \"none\" and protocol == \"smtp\":\n server, port = get_server(protocol, False)\n if app.config[\"INBOUND_TLS_ENFORCE\"]:\n if \"Auth-SSL\" in headers and headers[\"Auth-SSL\"] == \"on\":\n return {\n \"Auth-Status\": \"OK\",\n \"Auth-Server\": server,\n \"Auth-Port\": port\n }\n else:\n status, code = get_status(protocol, \"encryption\")\n return {\n \"Auth-Status\": status,\n \"Auth-Error-Code\" : code,\n \"Auth-Wait\": 0\n }\n else:\n return {\n \"Auth-Status\": \"OK\",\n \"Auth-Server\": server,\n \"Auth-Port\": port\n }\n # Authenticated user\n elif method == \"plain\":\n server, port = get_server(headers[\"Auth-Protocol\"], True)\n # According to RFC2616 section 3.7.1 and PEP 3333, HTTP headers should\n # be ASCII and are generally considered ISO8859-1. However when passing\n # the password, nginx does not transcode the input UTF string, thus\n # we need to manually decode.\n raw_user_email = urllib.parse.unquote(headers[\"Auth-User\"])\n user_email = raw_user_email.encode(\"iso8859-1\").decode(\"utf8\")\n raw_password = urllib.parse.unquote(headers[\"Auth-Pass\"])\n password = raw_password.encode(\"iso8859-1\").decode(\"utf8\")\n ip = urllib.parse.unquote(headers[\"Client-Ip\"])\n service_port = int(urllib.parse.unquote(headers[\"Auth-Port\"]))\n if service_port == 25:\n return {\n \"Auth-Status\": \"AUTH not supported\",\n \"Auth-Error-Code\": \"502 5.5.1\",\n \"Auth-Wait\": 0\n }\n user = models.User.query.get(user_email)\n if check_credentials(user, password, ip, protocol):\n return {\n \"Auth-Status\": \"OK\",\n \"Auth-Server\": server,\n \"Auth-Port\": port\n }\n else:\n status, code = get_status(protocol, \"authentication\")\n return {\n \"Auth-Status\": status,\n \"Auth-Error-Code\": code,\n \"Auth-Wait\": 0\n }\n # Unexpected\n return {}\n\n\ndef get_status(protocol, status):\n \"\"\" Return the proper error code depending on the protocol\n \"\"\"\n status, codes = STATUSES[status]\n return status, codes[protocol]\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\ndef get_server(protocol, authenticated=False):\n if protocol == \"imap\":\n hostname, port = extract_host_port(app.config['IMAP_ADDRESS'], 143)\n elif protocol == \"pop3\":\n hostname, port = extract_host_port(app.config['POP3_ADDRESS'], 110)\n elif protocol == \"smtp\":\n if authenticated:\n hostname, port = extract_host_port(app.config['AUTHSMTP_ADDRESS'], 10025)\n else:\n hostname, port = extract_host_port(app.config['SMTP_ADDRESS'], 25)\n try:\n # test if hostname is already resolved to an ip adddress\n ipaddress.ip_address(hostname)\n except:\n # hostname is not an ip address - so we need to resolve it\n hostname = resolve_hostname(hostname)\n return hostname, port\n\[email protected](stop=tenacity.stop_after_attempt(100),\n wait=tenacity.wait_random(min=2, max=5))\ndef resolve_hostname(hostname):\n \"\"\" This function uses system DNS to resolve a hostname.\n It is capable of retrying in case the host is not immediately available\n \"\"\"\n return socket.gethostbyname(hostname)\n", "path": "core/admin/mailu/internal/nginx.py"}]} | 2,695 | 770 |
gh_patches_debug_27115 | rasdani/github-patches | git_diff | nonebot__nonebot2-135 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: 错误删除未执行过的`temp matcher`
**描述问题:**
处理事件时错误地删除了未执行过的 `temp matcher`
**环境信息:**
- OS: any
- Python Version: any
- Nonebot Version: 2.0.0a8
</issue>
<code>
[start of nonebot/message.py]
1 """
2 事件处理
3 ========
4
5 NoneBot 内部处理并按优先级分发事件给所有事件响应器,提供了多个插槽以进行事件的预处理等。
6 """
7
8 import asyncio
9 from datetime import datetime
10 from typing import Set, Type, Optional, Iterable, TYPE_CHECKING
11
12 from nonebot.log import logger
13 from nonebot.rule import TrieRule
14 from nonebot.matcher import matchers, Matcher
15 from nonebot.exception import IgnoredException, StopPropagation, NoLogException
16 from nonebot.typing import T_State, T_EventPreProcessor, T_RunPreProcessor, T_EventPostProcessor, T_RunPostProcessor
17
18 if TYPE_CHECKING:
19 from nonebot.adapters import Bot, Event
20
21 _event_preprocessors: Set[T_EventPreProcessor] = set()
22 _event_postprocessors: Set[T_EventPostProcessor] = set()
23 _run_preprocessors: Set[T_RunPreProcessor] = set()
24 _run_postprocessors: Set[T_RunPostProcessor] = set()
25
26
27 def event_preprocessor(func: T_EventPreProcessor) -> T_EventPreProcessor:
28 """
29 :说明:
30
31 事件预处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之前执行。
32
33 :参数:
34
35 事件预处理函数接收三个参数。
36
37 * ``bot: Bot``: Bot 对象
38 * ``event: Event``: Event 对象
39 * ``state: T_State``: 当前 State
40 """
41 _event_preprocessors.add(func)
42 return func
43
44
45 def event_postprocessor(func: T_EventPostProcessor) -> T_EventPostProcessor:
46 """
47 :说明:
48
49 事件后处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之后执行。
50
51 :参数:
52
53 事件后处理函数接收三个参数。
54
55 * ``bot: Bot``: Bot 对象
56 * ``event: Event``: Event 对象
57 * ``state: T_State``: 当前事件运行前 State
58 """
59 _event_postprocessors.add(func)
60 return func
61
62
63 def run_preprocessor(func: T_RunPreProcessor) -> T_RunPreProcessor:
64 """
65 :说明:
66
67 运行预处理。装饰一个函数,使它在每次事件响应器运行前执行。
68
69 :参数:
70
71 运行预处理函数接收四个参数。
72
73 * ``matcher: Matcher``: 当前要运行的事件响应器
74 * ``bot: Bot``: Bot 对象
75 * ``event: Event``: Event 对象
76 * ``state: T_State``: 当前 State
77 """
78 _run_preprocessors.add(func)
79 return func
80
81
82 def run_postprocessor(func: T_RunPostProcessor) -> T_RunPostProcessor:
83 """
84 :说明:
85
86 运行后处理。装饰一个函数,使它在每次事件响应器运行后执行。
87
88 :参数:
89
90 运行后处理函数接收五个参数。
91
92 * ``matcher: Matcher``: 运行完毕的事件响应器
93 * ``exception: Optional[Exception]``: 事件响应器运行错误(如果存在)
94 * ``bot: Bot``: Bot 对象
95 * ``event: Event``: Event 对象
96 * ``state: T_State``: 当前 State
97 """
98 _run_postprocessors.add(func)
99 return func
100
101
102 async def _check_matcher(priority: int, bot: "Bot", event: "Event",
103 state: T_State) -> Iterable[Type[Matcher]]:
104 current_matchers = matchers[priority].copy()
105
106 async def _check(Matcher: Type[Matcher], bot: "Bot", event: "Event",
107 state: T_State) -> Optional[Type[Matcher]]:
108 try:
109 if (not Matcher.expire_time or datetime.now() <= Matcher.expire_time
110 ) and await Matcher.check_perm(
111 bot, event) and await Matcher.check_rule(bot, event, state):
112 return Matcher
113 except Exception as e:
114 logger.opt(colors=True, exception=e).error(
115 f"<r><bg #f8bbd0>Rule check failed for {Matcher}.</bg #f8bbd0></r>"
116 )
117 return None
118
119 async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:
120 if Matcher.temp or (Matcher.expire_time and
121 datetime.now() > Matcher.expire_time):
122 return Matcher
123 return None
124
125 checking_tasks = [
126 _check(Matcher, bot, event, state) for Matcher in current_matchers
127 ]
128 checking_expire_tasks = [
129 _check_expire(Matcher) for Matcher in current_matchers
130 ]
131 results = await asyncio.gather(*checking_tasks, return_exceptions=True)
132 expired = await asyncio.gather(*checking_expire_tasks)
133 for expired_matcher in filter(lambda x: x, expired):
134 try:
135 matchers[priority].remove(expired_matcher) # type: ignore
136 except Exception:
137 pass
138 return filter(lambda x: x, results)
139
140
141 async def _run_matcher(Matcher: Type[Matcher], bot: "Bot", event: "Event",
142 state: T_State) -> None:
143 logger.info(f"Event will be handled by {Matcher}")
144
145 matcher = Matcher()
146
147 coros = list(
148 map(lambda x: x(matcher, bot, event, state), _run_preprocessors))
149 if coros:
150 try:
151 await asyncio.gather(*coros)
152 except IgnoredException:
153 logger.opt(colors=True).info(
154 f"Matcher {matcher} running is <b>cancelled</b>")
155 return
156 except Exception as e:
157 logger.opt(colors=True, exception=e).error(
158 "<r><bg #f8bbd0>Error when running RunPreProcessors. "
159 "Running cancelled!</bg #f8bbd0></r>")
160 return
161
162 exception = None
163
164 try:
165 logger.debug(f"Running matcher {matcher}")
166 await matcher.run(bot, event, state)
167 except Exception as e:
168 logger.opt(colors=True, exception=e).error(
169 f"<r><bg #f8bbd0>Running matcher {matcher} failed.</bg #f8bbd0></r>"
170 )
171 exception = e
172
173 coros = list(
174 map(lambda x: x(matcher, exception, bot, event, state),
175 _run_postprocessors))
176 if coros:
177 try:
178 await asyncio.gather(*coros)
179 except Exception as e:
180 logger.opt(colors=True, exception=e).error(
181 "<r><bg #f8bbd0>Error when running RunPostProcessors</bg #f8bbd0></r>"
182 )
183
184 if matcher.block:
185 raise StopPropagation
186 return
187
188
189 async def handle_event(bot: "Bot", event: "Event"):
190 """
191 :说明:
192
193 处理一个事件。调用该函数以实现分发事件。
194
195 :参数:
196
197 * ``bot: Bot``: Bot 对象
198 * ``event: Event``: Event 对象
199
200 :示例:
201
202 .. code-block:: python
203
204 import asyncio
205 asyncio.create_task(handle_event(bot, event))
206 """
207 show_log = True
208 log_msg = f"<m>{bot.type.upper()} {bot.self_id}</m> | "
209 try:
210 log_msg += event.get_log_string()
211 except NoLogException:
212 show_log = False
213 if show_log:
214 logger.opt(colors=True).info(log_msg)
215
216 state = {}
217 coros = list(map(lambda x: x(bot, event, state), _event_preprocessors))
218 if coros:
219 try:
220 logger.debug("Running PreProcessors...")
221 await asyncio.gather(*coros)
222 except IgnoredException:
223 logger.opt(colors=True).info(
224 f"Event {event.get_event_name()} is <b>ignored</b>")
225 return
226 except Exception as e:
227 logger.opt(colors=True, exception=e).error(
228 "<r><bg #f8bbd0>Error when running EventPreProcessors. "
229 "Event ignored!</bg #f8bbd0></r>")
230 return
231
232 # Trie Match
233 _, _ = TrieRule.get_value(bot, event, state)
234
235 break_flag = False
236 for priority in sorted(matchers.keys()):
237 if break_flag:
238 break
239
240 if show_log:
241 logger.debug(f"Checking for matchers in priority {priority}...")
242
243 run_matchers = await _check_matcher(priority, bot, event, state)
244
245 pending_tasks = [
246 _run_matcher(matcher, bot, event, state.copy())
247 for matcher in run_matchers
248 ]
249
250 results = await asyncio.gather(*pending_tasks, return_exceptions=True)
251
252 for result in results:
253 if isinstance(result, StopPropagation):
254 if not break_flag:
255 break_flag = True
256 logger.debug("Stop event propagation")
257
258 coros = list(map(lambda x: x(bot, event, state), _event_postprocessors))
259 if coros:
260 try:
261 logger.debug("Running PostProcessors...")
262 await asyncio.gather(*coros)
263 except Exception as e:
264 logger.opt(colors=True, exception=e).error(
265 "<r><bg #f8bbd0>Error when running EventPostProcessors</bg #f8bbd0></r>"
266 )
267
[end of nonebot/message.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nonebot/message.py b/nonebot/message.py
--- a/nonebot/message.py
+++ b/nonebot/message.py
@@ -117,8 +117,7 @@
return None
async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:
- if Matcher.temp or (Matcher.expire_time and
- datetime.now() > Matcher.expire_time):
+ if Matcher.expire_time and datetime.now() > Matcher.expire_time:
return Matcher
return None
@@ -128,14 +127,19 @@
checking_expire_tasks = [
_check_expire(Matcher) for Matcher in current_matchers
]
- results = await asyncio.gather(*checking_tasks, return_exceptions=True)
+ results = await asyncio.gather(*checking_tasks)
expired = await asyncio.gather(*checking_expire_tasks)
for expired_matcher in filter(lambda x: x, expired):
try:
matchers[priority].remove(expired_matcher) # type: ignore
except Exception:
pass
- return filter(lambda x: x, results)
+ for temp_matcher in filter(lambda x: x and x.temp, results):
+ try:
+ matchers[priority].remove(temp_matcher) # type: ignore
+ except Exception:
+ pass
+ return filter(lambda x: x, results) # type: ignore
async def _run_matcher(Matcher: Type[Matcher], bot: "Bot", event: "Event",
| {"golden_diff": "diff --git a/nonebot/message.py b/nonebot/message.py\n--- a/nonebot/message.py\n+++ b/nonebot/message.py\n@@ -117,8 +117,7 @@\n return None\n \n async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:\n- if Matcher.temp or (Matcher.expire_time and\n- datetime.now() > Matcher.expire_time):\n+ if Matcher.expire_time and datetime.now() > Matcher.expire_time:\n return Matcher\n return None\n \n@@ -128,14 +127,19 @@\n checking_expire_tasks = [\n _check_expire(Matcher) for Matcher in current_matchers\n ]\n- results = await asyncio.gather(*checking_tasks, return_exceptions=True)\n+ results = await asyncio.gather(*checking_tasks)\n expired = await asyncio.gather(*checking_expire_tasks)\n for expired_matcher in filter(lambda x: x, expired):\n try:\n matchers[priority].remove(expired_matcher) # type: ignore\n except Exception:\n pass\n- return filter(lambda x: x, results)\n+ for temp_matcher in filter(lambda x: x and x.temp, results):\n+ try:\n+ matchers[priority].remove(temp_matcher) # type: ignore\n+ except Exception:\n+ pass\n+ return filter(lambda x: x, results) # type: ignore\n \n \n async def _run_matcher(Matcher: Type[Matcher], bot: \"Bot\", event: \"Event\",\n", "issue": "Bug: \u9519\u8bef\u5220\u9664\u672a\u6267\u884c\u8fc7\u7684`temp matcher`\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\n\u5904\u7406\u4e8b\u4ef6\u65f6\u9519\u8bef\u5730\u5220\u9664\u4e86\u672a\u6267\u884c\u8fc7\u7684 `temp matcher`\r\n\r\n**\u73af\u5883\u4fe1\u606f\uff1a**\r\n\r\n - OS: any\r\n - Python Version: any\r\n - Nonebot Version: 2.0.0a8\r\n\n", "before_files": [{"content": "\"\"\"\n\u4e8b\u4ef6\u5904\u7406\n========\n\nNoneBot \u5185\u90e8\u5904\u7406\u5e76\u6309\u4f18\u5148\u7ea7\u5206\u53d1\u4e8b\u4ef6\u7ed9\u6240\u6709\u4e8b\u4ef6\u54cd\u5e94\u5668\uff0c\u63d0\u4f9b\u4e86\u591a\u4e2a\u63d2\u69fd\u4ee5\u8fdb\u884c\u4e8b\u4ef6\u7684\u9884\u5904\u7406\u7b49\u3002\n\"\"\"\n\nimport asyncio\nfrom datetime import datetime\nfrom typing import Set, Type, Optional, Iterable, TYPE_CHECKING\n\nfrom nonebot.log import logger\nfrom nonebot.rule import TrieRule\nfrom nonebot.matcher import matchers, Matcher\nfrom nonebot.exception import IgnoredException, StopPropagation, NoLogException\nfrom nonebot.typing import T_State, T_EventPreProcessor, T_RunPreProcessor, T_EventPostProcessor, T_RunPostProcessor\n\nif TYPE_CHECKING:\n from nonebot.adapters import Bot, Event\n\n_event_preprocessors: Set[T_EventPreProcessor] = set()\n_event_postprocessors: Set[T_EventPostProcessor] = set()\n_run_preprocessors: Set[T_RunPreProcessor] = set()\n_run_postprocessors: Set[T_RunPostProcessor] = set()\n\n\ndef event_preprocessor(func: T_EventPreProcessor) -> T_EventPreProcessor:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4e8b\u4ef6\u9884\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u63a5\u6536\u5230\u4e8b\u4ef6\u5e76\u5206\u53d1\u7ed9\u5404\u54cd\u5e94\u5668\u4e4b\u524d\u6267\u884c\u3002\n\n :\u53c2\u6570:\n\n \u4e8b\u4ef6\u9884\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e09\u4e2a\u53c2\u6570\u3002\n\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: T_State``: \u5f53\u524d State\n \"\"\"\n _event_preprocessors.add(func)\n return func\n\n\ndef event_postprocessor(func: T_EventPostProcessor) -> T_EventPostProcessor:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4e8b\u4ef6\u540e\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u63a5\u6536\u5230\u4e8b\u4ef6\u5e76\u5206\u53d1\u7ed9\u5404\u54cd\u5e94\u5668\u4e4b\u540e\u6267\u884c\u3002\n\n :\u53c2\u6570:\n\n \u4e8b\u4ef6\u540e\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e09\u4e2a\u53c2\u6570\u3002\n\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: T_State``: \u5f53\u524d\u4e8b\u4ef6\u8fd0\u884c\u524d State\n \"\"\"\n _event_postprocessors.add(func)\n return func\n\n\ndef run_preprocessor(func: T_RunPreProcessor) -> T_RunPreProcessor:\n \"\"\"\n :\u8bf4\u660e:\n\n \u8fd0\u884c\u9884\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u524d\u6267\u884c\u3002\n\n :\u53c2\u6570:\n\n \u8fd0\u884c\u9884\u5904\u7406\u51fd\u6570\u63a5\u6536\u56db\u4e2a\u53c2\u6570\u3002\n\n * ``matcher: Matcher``: \u5f53\u524d\u8981\u8fd0\u884c\u7684\u4e8b\u4ef6\u54cd\u5e94\u5668\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: T_State``: \u5f53\u524d State\n \"\"\"\n _run_preprocessors.add(func)\n return func\n\n\ndef run_postprocessor(func: T_RunPostProcessor) -> T_RunPostProcessor:\n \"\"\"\n :\u8bf4\u660e:\n\n \u8fd0\u884c\u540e\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u540e\u6267\u884c\u3002\n\n :\u53c2\u6570:\n\n \u8fd0\u884c\u540e\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e94\u4e2a\u53c2\u6570\u3002\n\n * ``matcher: Matcher``: \u8fd0\u884c\u5b8c\u6bd5\u7684\u4e8b\u4ef6\u54cd\u5e94\u5668\n * ``exception: Optional[Exception]``: \u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u9519\u8bef\uff08\u5982\u679c\u5b58\u5728\uff09\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: T_State``: \u5f53\u524d State\n \"\"\"\n _run_postprocessors.add(func)\n return func\n\n\nasync def _check_matcher(priority: int, bot: \"Bot\", event: \"Event\",\n state: T_State) -> Iterable[Type[Matcher]]:\n current_matchers = matchers[priority].copy()\n\n async def _check(Matcher: Type[Matcher], bot: \"Bot\", event: \"Event\",\n state: T_State) -> Optional[Type[Matcher]]:\n try:\n if (not Matcher.expire_time or datetime.now() <= Matcher.expire_time\n ) and await Matcher.check_perm(\n bot, event) and await Matcher.check_rule(bot, event, state):\n return Matcher\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Rule check failed for {Matcher}.</bg #f8bbd0></r>\"\n )\n return None\n\n async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:\n if Matcher.temp or (Matcher.expire_time and\n datetime.now() > Matcher.expire_time):\n return Matcher\n return None\n\n checking_tasks = [\n _check(Matcher, bot, event, state) for Matcher in current_matchers\n ]\n checking_expire_tasks = [\n _check_expire(Matcher) for Matcher in current_matchers\n ]\n results = await asyncio.gather(*checking_tasks, return_exceptions=True)\n expired = await asyncio.gather(*checking_expire_tasks)\n for expired_matcher in filter(lambda x: x, expired):\n try:\n matchers[priority].remove(expired_matcher) # type: ignore\n except Exception:\n pass\n return filter(lambda x: x, results)\n\n\nasync def _run_matcher(Matcher: Type[Matcher], bot: \"Bot\", event: \"Event\",\n state: T_State) -> None:\n logger.info(f\"Event will be handled by {Matcher}\")\n\n matcher = Matcher()\n\n coros = list(\n map(lambda x: x(matcher, bot, event, state), _run_preprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(colors=True).info(\n f\"Matcher {matcher} running is <b>cancelled</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPreProcessors. \"\n \"Running cancelled!</bg #f8bbd0></r>\")\n return\n\n exception = None\n\n try:\n logger.debug(f\"Running matcher {matcher}\")\n await matcher.run(bot, event, state)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Running matcher {matcher} failed.</bg #f8bbd0></r>\"\n )\n exception = e\n\n coros = list(\n map(lambda x: x(matcher, exception, bot, event, state),\n _run_postprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPostProcessors</bg #f8bbd0></r>\"\n )\n\n if matcher.block:\n raise StopPropagation\n return\n\n\nasync def handle_event(bot: \"Bot\", event: \"Event\"):\n \"\"\"\n :\u8bf4\u660e:\n\n \u5904\u7406\u4e00\u4e2a\u4e8b\u4ef6\u3002\u8c03\u7528\u8be5\u51fd\u6570\u4ee5\u5b9e\u73b0\u5206\u53d1\u4e8b\u4ef6\u3002\n\n :\u53c2\u6570:\n\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n\n :\u793a\u4f8b:\n\n .. code-block:: python\n\n import asyncio\n asyncio.create_task(handle_event(bot, event))\n \"\"\"\n show_log = True\n log_msg = f\"<m>{bot.type.upper()} {bot.self_id}</m> | \"\n try:\n log_msg += event.get_log_string()\n except NoLogException:\n show_log = False\n if show_log:\n logger.opt(colors=True).info(log_msg)\n\n state = {}\n coros = list(map(lambda x: x(bot, event, state), _event_preprocessors))\n if coros:\n try:\n logger.debug(\"Running PreProcessors...\")\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(colors=True).info(\n f\"Event {event.get_event_name()} is <b>ignored</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPreProcessors. \"\n \"Event ignored!</bg #f8bbd0></r>\")\n return\n\n # Trie Match\n _, _ = TrieRule.get_value(bot, event, state)\n\n break_flag = False\n for priority in sorted(matchers.keys()):\n if break_flag:\n break\n\n if show_log:\n logger.debug(f\"Checking for matchers in priority {priority}...\")\n\n run_matchers = await _check_matcher(priority, bot, event, state)\n\n pending_tasks = [\n _run_matcher(matcher, bot, event, state.copy())\n for matcher in run_matchers\n ]\n\n results = await asyncio.gather(*pending_tasks, return_exceptions=True)\n\n for result in results:\n if isinstance(result, StopPropagation):\n if not break_flag:\n break_flag = True\n logger.debug(\"Stop event propagation\")\n\n coros = list(map(lambda x: x(bot, event, state), _event_postprocessors))\n if coros:\n try:\n logger.debug(\"Running PostProcessors...\")\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPostProcessors</bg #f8bbd0></r>\"\n )\n", "path": "nonebot/message.py"}]} | 3,348 | 340 |
gh_patches_debug_1452 | rasdani/github-patches | git_diff | wagtail__wagtail-11660 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wagtail Documentation favicon icon is missing (Not Found)
<!--
Summarise the documentation change you’re suggesting in the Issue title.
-->
### Pertinent section of the Wagtail docs
<!--
Copy the section link here.
-->
https://docs.wagtail.org/en/stable/getting_started/index.html
https://docs.wagtail.org/en/stable/getting_started/tutorial.html
https://docs.wagtail.org/en/stable/reference/index.html
https://docs.wagtail.org/en/stable/reference/pages/index.html
The issue persists in all the pages of documentation.
### Details
<!--
Provide a clear and concise description of what you want to happen.
-->
Wagtail has a nice favicon as per the logo which is displayed fine on this page of the documentation https://docs.wagtail.org/en/stable/
But on all the other pages the favicon is missing and not showing on the tab of chrome or any other browser tabs. When I checked the page source I found the favicon.ico is linked via `<link rel="shortcut icon" href="../../_static/favicon.ico" />` and this is going to https://docs.wagtail.org/en/_static/favicon.ico which is Not Found!
When I checked other sources for example CSS or logo image I found that is sourced like `src="../_static/img/wagtail-logo-new.svg` and takes to https://docs.wagtail.org/en/stable/_static/img/wagtail-logo-new.svg which is correct.
The difference between the favicon going 404 and the logo being available is that the favicon icon source is '../../_static' with an extra `../` which needs to be removed.
<img src="https://img001.prntscr.com/file/img001/zEYpfzNSQHqssOSc2_naxg.png" width="500">
<!--
If you're suggesting a very specific change to the documentation, feel free to directly submit a pull request.
-->
### Working on this
<!--
Do you have thoughts on skills needed?
Are you keen to work on this yourself once the issue has been accepted?
Please let us know here.
-->
It's a very minor fix and I already described the issue above. I could fix it but I am not sure exactly where in the documentation this favicon is coming from.
Anyone can contribute to this. View our [contributing guidelines](https://docs.wagtail.org/en/latest/contributing/index.html), add a comment to the issue once you’re ready to start.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from wagtail import __version__
4 from wagtail.utils.setup import assets, check_bdist_egg, sdist
5
6 try:
7 from setuptools import find_packages, setup
8 except ImportError:
9 from distutils.core import setup
10
11
12 # Hack to prevent "TypeError: 'NoneType' object is not callable" error
13 # in multiprocessing/util.py _exit_function when setup.py exits
14 # (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
15 try:
16 import multiprocessing # noqa: F401
17 except ImportError:
18 pass
19
20
21 install_requires = [
22 "Django>=4.2,<6.0",
23 "django-modelcluster>=6.2.1,<7.0",
24 "django-permissionedforms>=0.1,<1.0",
25 "django-taggit>=4.0,<5.1",
26 "django-treebeard>=4.5.1,<5.0",
27 "djangorestframework>=3.11.1,<4.0",
28 "django-filter>=23.3,<24",
29 "draftjs_exporter>=2.1.5,<6.0",
30 "Pillow>=9.1.0,<11.0.0",
31 "beautifulsoup4>=4.8,<4.13",
32 "Willow[heif]>=1.8.0,<2",
33 "requests>=2.11.1,<3.0",
34 "l18n>=2018.5",
35 "openpyxl>=3.0.10,<4.0",
36 "anyascii>=0.1.5",
37 "telepath>=0.3.1,<1",
38 "laces>=0.1,<0.2",
39 ]
40
41 # Testing dependencies
42 testing_extras = [
43 # Required for running the tests
44 "python-dateutil>=2.7",
45 "pytz>=2014.7",
46 "Jinja2>=3.0,<3.2",
47 "boto3>=1.28,<2",
48 "freezegun>=0.3.8",
49 "azure-mgmt-cdn>=12.0,<13.0",
50 "azure-mgmt-frontdoor>=1.0,<1.1",
51 "django-pattern-library>=0.7",
52 # For coverage and PEP8 linting
53 "coverage>=3.7.0",
54 "doc8==0.8.1",
55 "ruff==0.1.5",
56 # For enforcing string formatting mechanism in source files
57 "semgrep==1.40.0",
58 # For templates linting
59 "curlylint==0.13.1",
60 # For template indenting
61 "djhtml==3.0.6",
62 # For validating string formats in .po translation files
63 "polib>=1.1,<2.0",
64 # For wagtail.test.utils.wagtail_factories (used for streamfield migration toolkit)
65 "factory-boy>=3.2",
66 # For running tests in parallel
67 "tblib>=2.0,<3.0",
68 ]
69
70 # Documentation dependencies
71 documentation_extras = [
72 "pyenchant>=3.1.1,<4",
73 "sphinxcontrib-spelling>=7,<8",
74 "Sphinx>=1.5.2",
75 "sphinx-autobuild>=0.6.0",
76 "sphinx-wagtail-theme==6.2.0",
77 "myst_parser==2.0.0",
78 "sphinx_copybutton>=0.5,<1.0",
79 ]
80
81 setup(
82 name="wagtail",
83 version=__version__,
84 description="A Django content management system.",
85 author="Wagtail core team + contributors",
86 author_email="[email protected]", # For support queries, please see https://docs.wagtail.org/en/stable/support.html
87 url="https://wagtail.org/",
88 project_urls={
89 "Changelog": "https://github.com/wagtail/wagtail/blob/main/CHANGELOG.txt",
90 "Documentation": "https://docs.wagtail.org",
91 "Source": "https://github.com/wagtail/wagtail",
92 "Tracker": "https://github.com/wagtail/wagtail/issues",
93 },
94 packages=find_packages(),
95 include_package_data=True,
96 license="BSD",
97 long_description="Wagtail is an open source content management \
98 system built on Django, with a strong community and commercial support. \
99 It’s focused on user experience, and offers precise control for \
100 designers and developers.\n\n\
101 For more details, see https://wagtail.org, https://docs.wagtail.org and \
102 https://github.com/wagtail/wagtail/.",
103 classifiers=[
104 "Development Status :: 5 - Production/Stable",
105 "Environment :: Web Environment",
106 "Intended Audience :: Developers",
107 "License :: OSI Approved :: BSD License",
108 "Operating System :: OS Independent",
109 "Programming Language :: Python",
110 "Programming Language :: Python :: 3",
111 "Programming Language :: Python :: 3.8",
112 "Programming Language :: Python :: 3.9",
113 "Programming Language :: Python :: 3.10",
114 "Programming Language :: Python :: 3.11",
115 "Programming Language :: Python :: 3.12",
116 "Framework :: Django",
117 "Framework :: Django :: 4.2",
118 "Framework :: Django :: 5.0",
119 "Framework :: Wagtail",
120 "Topic :: Internet :: WWW/HTTP :: Site Management",
121 ],
122 python_requires=">=3.8",
123 install_requires=install_requires,
124 extras_require={"testing": testing_extras, "docs": documentation_extras},
125 entry_points="""
126 [console_scripts]
127 wagtail=wagtail.bin.wagtail:main
128 """,
129 zip_safe=False,
130 cmdclass={
131 "sdist": sdist,
132 "bdist_egg": check_bdist_egg,
133 "assets": assets,
134 },
135 )
136
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -73,7 +73,7 @@
"sphinxcontrib-spelling>=7,<8",
"Sphinx>=1.5.2",
"sphinx-autobuild>=0.6.0",
- "sphinx-wagtail-theme==6.2.0",
+ "sphinx-wagtail-theme==6.3.0",
"myst_parser==2.0.0",
"sphinx_copybutton>=0.5,<1.0",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -73,7 +73,7 @@\n \"sphinxcontrib-spelling>=7,<8\",\n \"Sphinx>=1.5.2\",\n \"sphinx-autobuild>=0.6.0\",\n- \"sphinx-wagtail-theme==6.2.0\",\n+ \"sphinx-wagtail-theme==6.3.0\",\n \"myst_parser==2.0.0\",\n \"sphinx_copybutton>=0.5,<1.0\",\n ]\n", "issue": "Wagtail Documentation favicon icon is missing (Not Found)\n<!--\r\n Summarise the documentation change you\u2019re suggesting in the Issue title.\r\n-->\r\n\r\n### Pertinent section of the Wagtail docs\r\n\r\n<!--\r\n Copy the section link here.\r\n-->\r\nhttps://docs.wagtail.org/en/stable/getting_started/index.html\r\nhttps://docs.wagtail.org/en/stable/getting_started/tutorial.html\r\nhttps://docs.wagtail.org/en/stable/reference/index.html\r\nhttps://docs.wagtail.org/en/stable/reference/pages/index.html\r\n\r\nThe issue persists in all the pages of documentation. \r\n\r\n### Details\r\n\r\n<!--\r\n Provide a clear and concise description of what you want to happen.\r\n-->\r\n\r\nWagtail has a nice favicon as per the logo which is displayed fine on this page of the documentation https://docs.wagtail.org/en/stable/\r\nBut on all the other pages the favicon is missing and not showing on the tab of chrome or any other browser tabs. When I checked the page source I found the favicon.ico is linked via `<link rel=\"shortcut icon\" href=\"../../_static/favicon.ico\" />` and this is going to https://docs.wagtail.org/en/_static/favicon.ico which is Not Found! \r\nWhen I checked other sources for example CSS or logo image I found that is sourced like `src=\"../_static/img/wagtail-logo-new.svg` and takes to https://docs.wagtail.org/en/stable/_static/img/wagtail-logo-new.svg which is correct. \r\n\r\nThe difference between the favicon going 404 and the logo being available is that the favicon icon source is '../../_static' with an extra `../` which needs to be removed. \r\n\r\n<img src=\"https://img001.prntscr.com/file/img001/zEYpfzNSQHqssOSc2_naxg.png\" width=\"500\">\r\n\r\n<!--\r\n If you're suggesting a very specific change to the documentation, feel free to directly submit a pull request.\r\n-->\r\n\r\n### Working on this\r\n\r\n<!--\r\n Do you have thoughts on skills needed?\r\n Are you keen to work on this yourself once the issue has been accepted?\r\n Please let us know here.\r\n-->\r\nIt's a very minor fix and I already described the issue above. I could fix it but I am not sure exactly where in the documentation this favicon is coming from.\r\n\r\nAnyone can contribute to this. View our [contributing guidelines](https://docs.wagtail.org/en/latest/contributing/index.html), add a comment to the issue once you\u2019re ready to start.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom wagtail import __version__\nfrom wagtail.utils.setup import assets, check_bdist_egg, sdist\n\ntry:\n from setuptools import find_packages, setup\nexcept ImportError:\n from distutils.core import setup\n\n\n# Hack to prevent \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when setup.py exits\n# (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing # noqa: F401\nexcept ImportError:\n pass\n\n\ninstall_requires = [\n \"Django>=4.2,<6.0\",\n \"django-modelcluster>=6.2.1,<7.0\",\n \"django-permissionedforms>=0.1,<1.0\",\n \"django-taggit>=4.0,<5.1\",\n \"django-treebeard>=4.5.1,<5.0\",\n \"djangorestframework>=3.11.1,<4.0\",\n \"django-filter>=23.3,<24\",\n \"draftjs_exporter>=2.1.5,<6.0\",\n \"Pillow>=9.1.0,<11.0.0\",\n \"beautifulsoup4>=4.8,<4.13\",\n \"Willow[heif]>=1.8.0,<2\",\n \"requests>=2.11.1,<3.0\",\n \"l18n>=2018.5\",\n \"openpyxl>=3.0.10,<4.0\",\n \"anyascii>=0.1.5\",\n \"telepath>=0.3.1,<1\",\n \"laces>=0.1,<0.2\",\n]\n\n# Testing dependencies\ntesting_extras = [\n # Required for running the tests\n \"python-dateutil>=2.7\",\n \"pytz>=2014.7\",\n \"Jinja2>=3.0,<3.2\",\n \"boto3>=1.28,<2\",\n \"freezegun>=0.3.8\",\n \"azure-mgmt-cdn>=12.0,<13.0\",\n \"azure-mgmt-frontdoor>=1.0,<1.1\",\n \"django-pattern-library>=0.7\",\n # For coverage and PEP8 linting\n \"coverage>=3.7.0\",\n \"doc8==0.8.1\",\n \"ruff==0.1.5\",\n # For enforcing string formatting mechanism in source files\n \"semgrep==1.40.0\",\n # For templates linting\n \"curlylint==0.13.1\",\n # For template indenting\n \"djhtml==3.0.6\",\n # For validating string formats in .po translation files\n \"polib>=1.1,<2.0\",\n # For wagtail.test.utils.wagtail_factories (used for streamfield migration toolkit)\n \"factory-boy>=3.2\",\n # For running tests in parallel\n \"tblib>=2.0,<3.0\",\n]\n\n# Documentation dependencies\ndocumentation_extras = [\n \"pyenchant>=3.1.1,<4\",\n \"sphinxcontrib-spelling>=7,<8\",\n \"Sphinx>=1.5.2\",\n \"sphinx-autobuild>=0.6.0\",\n \"sphinx-wagtail-theme==6.2.0\",\n \"myst_parser==2.0.0\",\n \"sphinx_copybutton>=0.5,<1.0\",\n]\n\nsetup(\n name=\"wagtail\",\n version=__version__,\n description=\"A Django content management system.\",\n author=\"Wagtail core team + contributors\",\n author_email=\"[email protected]\", # For support queries, please see https://docs.wagtail.org/en/stable/support.html\n url=\"https://wagtail.org/\",\n project_urls={\n \"Changelog\": \"https://github.com/wagtail/wagtail/blob/main/CHANGELOG.txt\",\n \"Documentation\": \"https://docs.wagtail.org\",\n \"Source\": \"https://github.com/wagtail/wagtail\",\n \"Tracker\": \"https://github.com/wagtail/wagtail/issues\",\n },\n packages=find_packages(),\n include_package_data=True,\n license=\"BSD\",\n long_description=\"Wagtail is an open source content management \\\nsystem built on Django, with a strong community and commercial support. \\\nIt\u2019s focused on user experience, and offers precise control for \\\ndesigners and developers.\\n\\n\\\nFor more details, see https://wagtail.org, https://docs.wagtail.org and \\\nhttps://github.com/wagtail/wagtail/.\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Framework :: Django\",\n \"Framework :: Django :: 4.2\",\n \"Framework :: Django :: 5.0\",\n \"Framework :: Wagtail\",\n \"Topic :: Internet :: WWW/HTTP :: Site Management\",\n ],\n python_requires=\">=3.8\",\n install_requires=install_requires,\n extras_require={\"testing\": testing_extras, \"docs\": documentation_extras},\n entry_points=\"\"\"\n [console_scripts]\n wagtail=wagtail.bin.wagtail:main\n \"\"\",\n zip_safe=False,\n cmdclass={\n \"sdist\": sdist,\n \"bdist_egg\": check_bdist_egg,\n \"assets\": assets,\n },\n)\n", "path": "setup.py"}]} | 2,697 | 129 |
gh_patches_debug_2792 | rasdani/github-patches | git_diff | docker__docker-py-3257 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Breaks with requests 2.32.0: Not supported URL scheme http+docker
With requests 2.32.0 (released about an hour ago as I write this), the docker library as called by [tox-docker](https://github.com/tox-dev/tox-docker) fails with the following exception:
```
Traceback (most recent call last):
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/adapters.py", line 532, in send
conn = self._get_connection(request, verify, proxies=proxies, cert=cert)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/adapters.py", line 400, in _get_connection
conn = self.poolmanager.connection_from_host(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/urllib3/poolmanager.py", line 304, in connection_from_host
return self.connection_from_context(request_context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/urllib3/poolmanager.py", line 326, in connection_from_context
raise URLSchemeUnknown(scheme)
urllib3.exceptions.URLSchemeUnknown: Not supported URL scheme http+docker
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py", line 214, in _retrieve_server_version
return self.version(api_version=False)["ApiVersion"]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/daemon.py", line 181, in version
return self._result(self._get(url), json=True)
^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/utils/decorators.py", line 46, in inner
return f(self, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py", line 237, in _get
return self.get(url, **self._set_request_timeout(kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/sessions.py", line 602, in get
return self.request("GET", url, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/sessions.py", line 589, in request
resp = self.send(prep, **send_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/sessions.py", line 703, in send
r = adapter.send(request, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/adapters.py", line 534, in send
raise InvalidURL(e, request=request)
requests.exceptions.InvalidURL: Not supported URL scheme http+docker
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox/session/cmd/run/single.py", line 48, in _evaluate
code, outcomes = run_commands(tox_env, no_test)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox/session/cmd/run/single.py", line 79, in run_commands
MANAGER.tox_before_run_commands(tox_env)
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox/plugin/manager.py", line 88, in tox_before_run_commands
self.manager.hook.tox_before_run_commands(tox_env=tox_env)
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_hooks.py", line 513, in __call__
return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_manager.py", line 120, in _hookexec
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_callers.py", line 139, in _multicall
raise exception.with_traceback(exception.__traceback__)
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_callers.py", line 103, in _multicall
res = hook_impl.function(*args)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox_docker/tox4/plugin.py", line 73, in tox_before_run_commands
docker_build_or_pull(container_config, log)
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox_docker/plugin.py", line 57, in docker_build_or_pull
docker_pull(container_config, log)
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox_docker/plugin.py", line 65, in docker_pull
docker = docker_module.from_env(version="auto")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/client.py", line 96, in from_env
return cls(
^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/client.py", line 45, in __init__
self.api = APIClient(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py", line 197, in __init__
self._version = self._retrieve_server_version()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py", line 221, in _retrieve_server_version
raise DockerException(
docker.errors.DockerException: Error while fetching server API version: Not supported URL scheme http+docker
```
Reverting to requests 2.31.0 without any other changes fixes the problem.
</issue>
<code>
[start of docker/transport/basehttpadapter.py]
1 import requests.adapters
2
3
4 class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
5 def close(self):
6 super().close()
7 if hasattr(self, 'pools'):
8 self.pools.clear()
9
[end of docker/transport/basehttpadapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/transport/basehttpadapter.py b/docker/transport/basehttpadapter.py
--- a/docker/transport/basehttpadapter.py
+++ b/docker/transport/basehttpadapter.py
@@ -6,3 +6,8 @@
super().close()
if hasattr(self, 'pools'):
self.pools.clear()
+
+ # Fix for requests 2.32.2+:
+ # https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
+ def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None):
+ return self.get_connection(request.url, proxies)
| {"golden_diff": "diff --git a/docker/transport/basehttpadapter.py b/docker/transport/basehttpadapter.py\n--- a/docker/transport/basehttpadapter.py\n+++ b/docker/transport/basehttpadapter.py\n@@ -6,3 +6,8 @@\n super().close()\n if hasattr(self, 'pools'):\n self.pools.clear()\n+\n+ # Fix for requests 2.32.2+:\n+ # https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05\n+ def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None):\n+ return self.get_connection(request.url, proxies)\n", "issue": "Breaks with requests 2.32.0: Not supported URL scheme http+docker\nWith requests 2.32.0 (released about an hour ago as I write this), the docker library as called by [tox-docker](https://github.com/tox-dev/tox-docker) fails with the following exception:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/adapters.py\", line 532, in send\r\n conn = self._get_connection(request, verify, proxies=proxies, cert=cert)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/adapters.py\", line 400, in _get_connection\r\n conn = self.poolmanager.connection_from_host(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/urllib3/poolmanager.py\", line 304, in connection_from_host\r\n return self.connection_from_context(request_context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/urllib3/poolmanager.py\", line 326, in connection_from_context\r\n raise URLSchemeUnknown(scheme)\r\nurllib3.exceptions.URLSchemeUnknown: Not supported URL scheme http+docker\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py\", line 214, in _retrieve_server_version\r\n return self.version(api_version=False)[\"ApiVersion\"]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/daemon.py\", line 181, in version\r\n return self._result(self._get(url), json=True)\r\n ^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/utils/decorators.py\", line 46, in inner\r\n return f(self, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py\", line 237, in _get\r\n return self.get(url, **self._set_request_timeout(kwargs))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/sessions.py\", line 602, in get\r\n return self.request(\"GET\", url, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/sessions.py\", line 589, in request\r\n resp = self.send(prep, **send_kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/sessions.py\", line 703, in send\r\n r = adapter.send(request, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/adapters.py\", line 534, in send\r\n raise InvalidURL(e, request=request)\r\nrequests.exceptions.InvalidURL: Not supported URL scheme http+docker\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox/session/cmd/run/single.py\", line 48, in _evaluate\r\n code, outcomes = run_commands(tox_env, no_test)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox/session/cmd/run/single.py\", line 79, in run_commands\r\n MANAGER.tox_before_run_commands(tox_env)\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox/plugin/manager.py\", line 88, in tox_before_run_commands\r\n self.manager.hook.tox_before_run_commands(tox_env=tox_env)\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_hooks.py\", line 513, in __call__\r\n return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_manager.py\", line 120, in _hookexec\r\n return self._inner_hookexec(hook_name, methods, kwargs, firstresult)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_callers.py\", line 139, in _multicall\r\n raise exception.with_traceback(exception.__traceback__)\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_callers.py\", line 103, in _multicall\r\n res = hook_impl.function(*args)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox_docker/tox4/plugin.py\", line 73, in tox_before_run_commands\r\n docker_build_or_pull(container_config, log)\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox_docker/plugin.py\", line 57, in docker_build_or_pull\r\n docker_pull(container_config, log)\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox_docker/plugin.py\", line 65, in docker_pull\r\n docker = docker_module.from_env(version=\"auto\")\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/client.py\", line 96, in from_env\r\n return cls(\r\n ^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/client.py\", line 45, in __init__\r\n self.api = APIClient(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py\", line 197, in __init__\r\n self._version = self._retrieve_server_version()\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py\", line 221, in _retrieve_server_version\r\n raise DockerException(\r\ndocker.errors.DockerException: Error while fetching server API version: Not supported URL scheme http+docker\r\n```\r\n\r\nReverting to requests 2.31.0 without any other changes fixes the problem.\n", "before_files": [{"content": "import requests.adapters\n\n\nclass BaseHTTPAdapter(requests.adapters.HTTPAdapter):\n def close(self):\n super().close()\n if hasattr(self, 'pools'):\n self.pools.clear()\n", "path": "docker/transport/basehttpadapter.py"}]} | 2,358 | 170 |
gh_patches_debug_32124 | rasdani/github-patches | git_diff | translate__pootle-5451 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove SubfieldBase from MultiStringField
In django 1.9 SubFieldBase is deprecated, and removed in 1.10
Related Stackoverflow - http://stackoverflow.com/questions/35166085/how-to-deal-with-subfieldbase-has-been-deprecated-use-field-from-db-value-inst
https://docs.djangoproject.com/en/1.9/ref/models/fields/#field-api-reference
afaict we can just safely remove - it seems it just needs to have to/from db methods
</issue>
<code>
[start of pootle/apps/pootle_store/fields.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 """Fields required for handling translation files"""
10
11 import logging
12 import os
13
14 from translate.misc.multistring import multistring
15
16 from django.db import models
17 from django.db.models.fields.files import FieldFile, FileField
18
19 from pootle.core.utils.multistring import (parse_multistring,
20 unparse_multistring)
21
22
23 # # # # # # # # # String # # # # # # # # # # # # # # #
24
25
26 def to_db(value):
27 """Flatten the given value (string, list of plurals or multistring) into
28 the database string representation.
29 """
30 if value is None:
31 return None
32
33 return unparse_multistring(value)
34
35
36 def to_python(value):
37 """Reconstruct a multistring from the database string representation."""
38 if not value:
39 return multistring("", encoding="UTF-8")
40 elif isinstance(value, multistring):
41 return value
42 elif isinstance(value, basestring):
43 return parse_multistring(value)
44 elif isinstance(value, dict):
45 return multistring([val for __, val in sorted(value.items())],
46 encoding="UTF-8")
47 else:
48 return multistring(value, encoding="UTF-8")
49
50
51 class MultiStringField(models.Field):
52 description = \
53 "a field imitating translate.misc.multistring used for plurals"
54 __metaclass__ = models.SubfieldBase
55
56 def __init__(self, *args, **kwargs):
57 super(MultiStringField, self).__init__(*args, **kwargs)
58
59 def get_internal_type(self):
60 return "TextField"
61
62 def to_python(self, value):
63 return to_python(value)
64
65 def get_prep_value(self, value):
66 return to_db(value)
67
68 def get_prep_lookup(self, lookup_type, value):
69 if (lookup_type in ('exact', 'iexact') or
70 not isinstance(value, basestring)):
71 value = self.get_prep_value(value)
72 return super(MultiStringField, self).get_prep_lookup(lookup_type,
73 value)
74
75
76 # # # # # # # # # File # # # # # # # # # # # # # # # #
77
78
79 class StoreTuple(object):
80 """Encapsulates toolkit stores in the in memory cache, needed
81 since LRUCachingDict is based on a weakref.WeakValueDictionary
82 which cannot reference normal tuples
83 """
84
85 def __init__(self, store, mod_info, realpath):
86 self.store = store
87 self.mod_info = mod_info
88 self.realpath = realpath
89
90
91 class TranslationStoreFieldFile(FieldFile):
92 """FieldFile is the file-like object of a FileField, that is found in a
93 TranslationStoreField.
94 """
95
96 from translate.misc.lru import LRUCachingDict
97 from django.conf import settings
98
99 _store_cache = LRUCachingDict(settings.PARSE_POOL_SIZE,
100 settings.PARSE_POOL_CULL_FREQUENCY)
101
102 def getpomtime(self):
103 file_stat = os.stat(self.realpath)
104 return file_stat.st_mtime, file_stat.st_size
105
106 @property
107 def filename(self):
108 return os.path.basename(self.name)
109
110 def _get_realpath(self):
111 """Return realpath resolving symlinks if necessary."""
112 if not hasattr(self, "_realpath"):
113 # Django's db.models.fields.files.FieldFile raises ValueError if
114 # if the file field has no name - and tests "if self" to check
115 if self:
116 self._realpath = os.path.realpath(self.path)
117 else:
118 self._realpath = ''
119 return self._realpath
120
121 @property
122 def realpath(self):
123 """Get real path from cache before attempting to check for symlinks."""
124 if not hasattr(self, "_store_tuple"):
125 return self._get_realpath()
126 else:
127 return self._store_tuple.realpath
128
129 @property
130 def store(self):
131 """Get translation store from dictionary cache, populate if store not
132 already cached.
133 """
134 self._update_store_cache()
135 return self._store_tuple.store
136
137 def _update_store_cache(self):
138 """Add translation store to dictionary cache, replace old cached
139 version if needed.
140 """
141 if self.exists():
142 mod_info = self.getpomtime()
143 else:
144 mod_info = 0
145 if (not hasattr(self, "_store_tuple") or
146 self._store_tuple.mod_info != mod_info):
147 try:
148 self._store_tuple = self._store_cache[self.path]
149 if self._store_tuple.mod_info != mod_info:
150 # if file is modified act as if it doesn't exist in cache
151 raise KeyError
152 except KeyError:
153 logging.debug(u"Cache miss for %s", self.path)
154 from translate.storage import factory
155
156 fileclass = self.instance.syncer.file_class
157 classes = {
158 str(self.instance.filetype.extension): fileclass,
159 str(self.instance.filetype.template_extension): fileclass}
160 store_obj = factory.getobject(self.path,
161 ignore=self.field.ignore,
162 classes=classes)
163 self._store_tuple = StoreTuple(store_obj, mod_info,
164 self.realpath)
165 self._store_cache[self.path] = self._store_tuple
166
167 def _touch_store_cache(self):
168 """Update stored mod_info without reparsing file."""
169 if hasattr(self, "_store_tuple"):
170 mod_info = self.getpomtime()
171 if self._store_tuple.mod_info != mod_info:
172 self._store_tuple.mod_info = mod_info
173 else:
174 # FIXME: do we really need that?
175 self._update_store_cache()
176
177 def _delete_store_cache(self):
178 """Remove translation store from cache."""
179 try:
180 del self._store_cache[self.path]
181 except KeyError:
182 pass
183
184 try:
185 del self._store_tuple
186 except AttributeError:
187 pass
188
189 def exists(self):
190 return os.path.exists(self.realpath)
191
192 def savestore(self):
193 """Saves to temporary file then moves over original file. This way we
194 avoid the need for locking.
195 """
196 import shutil
197 from pootle.core.utils import ptempfile as tempfile
198 tmpfile, tmpfilename = tempfile.mkstemp(suffix=self.filename)
199 os.close(tmpfile)
200 self.store.savefile(tmpfilename)
201 shutil.move(tmpfilename, self.realpath)
202 self._touch_store_cache()
203
204 def save(self, name, content, save=True):
205 # FIXME: implement save to tmp file then move instead of directly
206 # saving
207 super(TranslationStoreFieldFile, self).save(name, content, save)
208 self._delete_store_cache()
209
210 def delete(self, save=True):
211 self._delete_store_cache()
212 if save:
213 super(TranslationStoreFieldFile, self).delete(save)
214
215
216 class TranslationStoreField(FileField):
217 """This is the field class to represent a FileField in a model that
218 represents a translation store.
219 """
220
221 attr_class = TranslationStoreFieldFile
222
223 def __init__(self, ignore=None, **kwargs):
224 """ignore: postfix to be stripped from filename when trying to
225 determine file format for parsing, useful for .pending files
226 """
227 self.ignore = ignore
228 super(TranslationStoreField, self).__init__(**kwargs)
229
230 def deconstruct(self):
231 name, path, args, kwargs = super(TranslationStoreField,
232 self).deconstruct()
233 if self.ignore is not None:
234 kwargs['ignore'] = self.ignore
235 return name, path, args, kwargs
236
[end of pootle/apps/pootle_store/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_store/fields.py b/pootle/apps/pootle_store/fields.py
--- a/pootle/apps/pootle_store/fields.py
+++ b/pootle/apps/pootle_store/fields.py
@@ -48,10 +48,30 @@
return multistring(value, encoding="UTF-8")
+class CastOnAssignDescriptor(object):
+ """
+ A property descriptor which ensures that `field.to_python()` is called on
+ _every_ assignment to the field. This used to be provided by the
+ `django.db.models.subclassing.Creator` class, which in turn was used by the
+ deprecated-in-Django-1.10 `SubfieldBase` class, hence the reimplementation
+ here.
+ """
+
+ def __init__(self, field):
+ self.field = field
+
+ def __get__(self, obj, type=None):
+ if obj is None:
+ return self
+ return obj.__dict__[self.field.name]
+
+ def __set__(self, obj, value):
+ obj.__dict__[self.field.name] = self.field.to_python(value)
+
+
class MultiStringField(models.Field):
description = \
"a field imitating translate.misc.multistring used for plurals"
- __metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
super(MultiStringField, self).__init__(*args, **kwargs)
@@ -62,6 +82,9 @@
def to_python(self, value):
return to_python(value)
+ def from_db_value(self, value, expression, connection, context):
+ return to_python(value)
+
def get_prep_value(self, value):
return to_db(value)
@@ -72,6 +95,10 @@
return super(MultiStringField, self).get_prep_lookup(lookup_type,
value)
+ def contribute_to_class(self, cls, name):
+ super(MultiStringField, self).contribute_to_class(cls, name)
+ setattr(cls, name, CastOnAssignDescriptor(self))
+
# # # # # # # # # File # # # # # # # # # # # # # # # #
| {"golden_diff": "diff --git a/pootle/apps/pootle_store/fields.py b/pootle/apps/pootle_store/fields.py\n--- a/pootle/apps/pootle_store/fields.py\n+++ b/pootle/apps/pootle_store/fields.py\n@@ -48,10 +48,30 @@\n return multistring(value, encoding=\"UTF-8\")\n \n \n+class CastOnAssignDescriptor(object):\n+ \"\"\"\n+ A property descriptor which ensures that `field.to_python()` is called on\n+ _every_ assignment to the field. This used to be provided by the\n+ `django.db.models.subclassing.Creator` class, which in turn was used by the\n+ deprecated-in-Django-1.10 `SubfieldBase` class, hence the reimplementation\n+ here.\n+ \"\"\"\n+\n+ def __init__(self, field):\n+ self.field = field\n+\n+ def __get__(self, obj, type=None):\n+ if obj is None:\n+ return self\n+ return obj.__dict__[self.field.name]\n+\n+ def __set__(self, obj, value):\n+ obj.__dict__[self.field.name] = self.field.to_python(value)\n+\n+\n class MultiStringField(models.Field):\n description = \\\n \"a field imitating translate.misc.multistring used for plurals\"\n- __metaclass__ = models.SubfieldBase\n \n def __init__(self, *args, **kwargs):\n super(MultiStringField, self).__init__(*args, **kwargs)\n@@ -62,6 +82,9 @@\n def to_python(self, value):\n return to_python(value)\n \n+ def from_db_value(self, value, expression, connection, context):\n+ return to_python(value)\n+\n def get_prep_value(self, value):\n return to_db(value)\n \n@@ -72,6 +95,10 @@\n return super(MultiStringField, self).get_prep_lookup(lookup_type,\n value)\n \n+ def contribute_to_class(self, cls, name):\n+ super(MultiStringField, self).contribute_to_class(cls, name)\n+ setattr(cls, name, CastOnAssignDescriptor(self))\n+\n \n # # # # # # # # # File # # # # # # # # # # # # # # # #\n", "issue": "Remove SubfieldBase from MultiStringField\nIn django 1.9 SubFieldBase is deprecated, and removed in 1.10\n\nRelated Stackoverflow - http://stackoverflow.com/questions/35166085/how-to-deal-with-subfieldbase-has-been-deprecated-use-field-from-db-value-inst\n\nhttps://docs.djangoproject.com/en/1.9/ref/models/fields/#field-api-reference\n\nafaict we can just safely remove - it seems it just needs to have to/from db methods\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\"\"\"Fields required for handling translation files\"\"\"\n\nimport logging\nimport os\n\nfrom translate.misc.multistring import multistring\n\nfrom django.db import models\nfrom django.db.models.fields.files import FieldFile, FileField\n\nfrom pootle.core.utils.multistring import (parse_multistring,\n unparse_multistring)\n\n\n# # # # # # # # # String # # # # # # # # # # # # # # #\n\n\ndef to_db(value):\n \"\"\"Flatten the given value (string, list of plurals or multistring) into\n the database string representation.\n \"\"\"\n if value is None:\n return None\n\n return unparse_multistring(value)\n\n\ndef to_python(value):\n \"\"\"Reconstruct a multistring from the database string representation.\"\"\"\n if not value:\n return multistring(\"\", encoding=\"UTF-8\")\n elif isinstance(value, multistring):\n return value\n elif isinstance(value, basestring):\n return parse_multistring(value)\n elif isinstance(value, dict):\n return multistring([val for __, val in sorted(value.items())],\n encoding=\"UTF-8\")\n else:\n return multistring(value, encoding=\"UTF-8\")\n\n\nclass MultiStringField(models.Field):\n description = \\\n \"a field imitating translate.misc.multistring used for plurals\"\n __metaclass__ = models.SubfieldBase\n\n def __init__(self, *args, **kwargs):\n super(MultiStringField, self).__init__(*args, **kwargs)\n\n def get_internal_type(self):\n return \"TextField\"\n\n def to_python(self, value):\n return to_python(value)\n\n def get_prep_value(self, value):\n return to_db(value)\n\n def get_prep_lookup(self, lookup_type, value):\n if (lookup_type in ('exact', 'iexact') or\n not isinstance(value, basestring)):\n value = self.get_prep_value(value)\n return super(MultiStringField, self).get_prep_lookup(lookup_type,\n value)\n\n\n# # # # # # # # # File # # # # # # # # # # # # # # # #\n\n\nclass StoreTuple(object):\n \"\"\"Encapsulates toolkit stores in the in memory cache, needed\n since LRUCachingDict is based on a weakref.WeakValueDictionary\n which cannot reference normal tuples\n \"\"\"\n\n def __init__(self, store, mod_info, realpath):\n self.store = store\n self.mod_info = mod_info\n self.realpath = realpath\n\n\nclass TranslationStoreFieldFile(FieldFile):\n \"\"\"FieldFile is the file-like object of a FileField, that is found in a\n TranslationStoreField.\n \"\"\"\n\n from translate.misc.lru import LRUCachingDict\n from django.conf import settings\n\n _store_cache = LRUCachingDict(settings.PARSE_POOL_SIZE,\n settings.PARSE_POOL_CULL_FREQUENCY)\n\n def getpomtime(self):\n file_stat = os.stat(self.realpath)\n return file_stat.st_mtime, file_stat.st_size\n\n @property\n def filename(self):\n return os.path.basename(self.name)\n\n def _get_realpath(self):\n \"\"\"Return realpath resolving symlinks if necessary.\"\"\"\n if not hasattr(self, \"_realpath\"):\n # Django's db.models.fields.files.FieldFile raises ValueError if\n # if the file field has no name - and tests \"if self\" to check\n if self:\n self._realpath = os.path.realpath(self.path)\n else:\n self._realpath = ''\n return self._realpath\n\n @property\n def realpath(self):\n \"\"\"Get real path from cache before attempting to check for symlinks.\"\"\"\n if not hasattr(self, \"_store_tuple\"):\n return self._get_realpath()\n else:\n return self._store_tuple.realpath\n\n @property\n def store(self):\n \"\"\"Get translation store from dictionary cache, populate if store not\n already cached.\n \"\"\"\n self._update_store_cache()\n return self._store_tuple.store\n\n def _update_store_cache(self):\n \"\"\"Add translation store to dictionary cache, replace old cached\n version if needed.\n \"\"\"\n if self.exists():\n mod_info = self.getpomtime()\n else:\n mod_info = 0\n if (not hasattr(self, \"_store_tuple\") or\n self._store_tuple.mod_info != mod_info):\n try:\n self._store_tuple = self._store_cache[self.path]\n if self._store_tuple.mod_info != mod_info:\n # if file is modified act as if it doesn't exist in cache\n raise KeyError\n except KeyError:\n logging.debug(u\"Cache miss for %s\", self.path)\n from translate.storage import factory\n\n fileclass = self.instance.syncer.file_class\n classes = {\n str(self.instance.filetype.extension): fileclass,\n str(self.instance.filetype.template_extension): fileclass}\n store_obj = factory.getobject(self.path,\n ignore=self.field.ignore,\n classes=classes)\n self._store_tuple = StoreTuple(store_obj, mod_info,\n self.realpath)\n self._store_cache[self.path] = self._store_tuple\n\n def _touch_store_cache(self):\n \"\"\"Update stored mod_info without reparsing file.\"\"\"\n if hasattr(self, \"_store_tuple\"):\n mod_info = self.getpomtime()\n if self._store_tuple.mod_info != mod_info:\n self._store_tuple.mod_info = mod_info\n else:\n # FIXME: do we really need that?\n self._update_store_cache()\n\n def _delete_store_cache(self):\n \"\"\"Remove translation store from cache.\"\"\"\n try:\n del self._store_cache[self.path]\n except KeyError:\n pass\n\n try:\n del self._store_tuple\n except AttributeError:\n pass\n\n def exists(self):\n return os.path.exists(self.realpath)\n\n def savestore(self):\n \"\"\"Saves to temporary file then moves over original file. This way we\n avoid the need for locking.\n \"\"\"\n import shutil\n from pootle.core.utils import ptempfile as tempfile\n tmpfile, tmpfilename = tempfile.mkstemp(suffix=self.filename)\n os.close(tmpfile)\n self.store.savefile(tmpfilename)\n shutil.move(tmpfilename, self.realpath)\n self._touch_store_cache()\n\n def save(self, name, content, save=True):\n # FIXME: implement save to tmp file then move instead of directly\n # saving\n super(TranslationStoreFieldFile, self).save(name, content, save)\n self._delete_store_cache()\n\n def delete(self, save=True):\n self._delete_store_cache()\n if save:\n super(TranslationStoreFieldFile, self).delete(save)\n\n\nclass TranslationStoreField(FileField):\n \"\"\"This is the field class to represent a FileField in a model that\n represents a translation store.\n \"\"\"\n\n attr_class = TranslationStoreFieldFile\n\n def __init__(self, ignore=None, **kwargs):\n \"\"\"ignore: postfix to be stripped from filename when trying to\n determine file format for parsing, useful for .pending files\n \"\"\"\n self.ignore = ignore\n super(TranslationStoreField, self).__init__(**kwargs)\n\n def deconstruct(self):\n name, path, args, kwargs = super(TranslationStoreField,\n self).deconstruct()\n if self.ignore is not None:\n kwargs['ignore'] = self.ignore\n return name, path, args, kwargs\n", "path": "pootle/apps/pootle_store/fields.py"}]} | 2,935 | 507 |
gh_patches_debug_14953 | rasdani/github-patches | git_diff | python-poetry__poetry-1909 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Setting credentials through env. variable are not working
<!-- Checked checkbox should look like this: [x] -->
- [ x ] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [ x ] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [ x ] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
<!--
Once those are done, if you're able to fill in the following list with your information,
it'd be very helpful to whoever handles the issue.
-->
- **OS version and name**: *nix
- **Poetry version**: Poetry version 1.0.0
## Issue
<!-- Now feel free to write your issue, but please be descriptive! Thanks again 🙌 ❤️ -->
I'm failing to use env. variables to set custom pypi credentials.
My pyproject.toml contains private pypi's like this:
```toml
[[tool.poetry.source]]
url = "https://XXXXX/nexus/repository/pypi-central/simple"
name = "nexus"
```
I'm running this script:
```sh
export POETRY_HTTP_BASIC_NEXUS_USERNAME=****
export POETRY_HTTP_BASIC_NEXUS_PASSWORD=****
poetry install
```
and it fails with:
```
[EnvCommandError]
Command ['/opt/.cache/pypoetry/virtualenvs/YYYY-4zvP7SOo-py3.8/bin/pip', 'install', '--no-deps', '--index-url', 'https://XXXXX/nexus/repository/pypi-central/simple', '--extra-index-url', 'https://pypi.org/', 'six==1.12.0'] errored with the following return code 2, and output:
Looking in indexes: https://RESOLVED-XXXXX/nexus/repository/pypi-central/simple, https://****:****@XXXXX/nexus/repository/epd-pypi/simple, https://pypi.org/
Collecting six==1.12.0
....
File "/opt/.cache/pypoetry/virtualenvs/YYYY-4zvP7SOo-py3.8/lib/python3.8/site-packages/pip/_internal/download.py", line 386, in handle_401
username, password, save = self._prompt_for_password(parsed.netloc)
File "/opt/.cache/pypoetry/virtualenvs/YYYY-4zvP7SOo-py3.8/lib/python3.8/site-packages/pip/_internal/download.py", line 358, in _prompt_for_password
username = ask_input("User for %s: " % netloc)
File "/opt/.cache/pypoetry/virtualenvs/YYYY-4zvP7SOo-py3.8/lib/python3.8/site-packages/pip/_internal/utils/misc.py", line 281, in ask_input
return input(message)
EOFError: EOF when reading a line
User for XXXXX:
```
I investigated the code and it seems that credentials are never acquired separately from config, but always as a pair. That means that code never ask for `http-basic.nexus.password` and `http-basic.nexus.username`, but for `http-basic.nexus` then the value is used as a dict (search for `password_manager.get_http_auth` usage). I could not find single test case, so I wrote one, fill free to use it:
```diff
diff --git a/tests/config/test_config.py b/tests/config/test_config.py
index 07373ad..72ad236 100644
--- a/tests/config/test_config.py
+++ b/tests/config/test_config.py
@@ -14,3 +14,13 @@ def test_config_get_from_environment_variable(config, environ):
os.environ["POETRY_VIRTUALENVS_CREATE"] = "false"
assert not config.get("virtualenvs.create")
+
+def test_basic_http_credentials_through_env(config, environ):
+ assert config.get("http-basic.test_repo") is None
+
+ os.environ["POETRY_HTTP_BASIC_TEST_REPO_USERNAME"] = "foo"
+ os.environ["POETRY_HTTP_BASIC_TEST_REPO_PASSWORD"] = "bar"
+ credentials = config.get("http-basic.test-repo")
+ assert credentials is not None
+ assert credentials["username"] == "foo"
+ assert credentials["password"] == "bar"
```
</issue>
<code>
[start of poetry/utils/password_manager.py]
1 import logging
2
3
4 logger = logging.getLogger(__name__)
5
6
7 class PasswordManagerError(Exception):
8
9 pass
10
11
12 class KeyRingError(Exception):
13
14 pass
15
16
17 class KeyRing:
18 def __init__(self, namespace):
19 self._namespace = namespace
20 self._is_available = True
21
22 self._check()
23
24 def is_available(self):
25 return self._is_available
26
27 def get_password(self, name, username):
28 if not self.is_available():
29 return
30
31 import keyring
32 import keyring.errors
33
34 name = self.get_entry_name(name)
35
36 try:
37 return keyring.get_password(name, username)
38 except (RuntimeError, keyring.errors.KeyringError):
39 raise KeyRingError(
40 "Unable to retrieve the password for {} from the key ring".format(name)
41 )
42
43 def set_password(self, name, username, password):
44 if not self.is_available():
45 return
46
47 import keyring
48 import keyring.errors
49
50 name = self.get_entry_name(name)
51
52 try:
53 keyring.set_password(name, username, password)
54 except (RuntimeError, keyring.errors.KeyringError) as e:
55 raise KeyRingError(
56 "Unable to store the password for {} in the key ring: {}".format(
57 name, str(e)
58 )
59 )
60
61 def delete_password(self, name, username):
62 if not self.is_available():
63 return
64
65 import keyring
66 import keyring.errors
67
68 name = self.get_entry_name(name)
69
70 try:
71 keyring.delete_password(name, username)
72 except (RuntimeError, keyring.errors.KeyringError):
73 raise KeyRingError(
74 "Unable to delete the password for {} from the key ring".format(name)
75 )
76
77 def get_entry_name(self, name):
78 return "{}-{}".format(self._namespace, name)
79
80 def _check(self):
81 try:
82 import keyring
83 except Exception as e:
84 logger.debug("An error occurred while importing keyring: {}".format(str(e)))
85 self._is_available = False
86
87 return
88
89 backend = keyring.get_keyring()
90 name = backend.name.split(" ")[0]
91 if name == "fail":
92 logger.debug("No suitable keyring backend found")
93 self._is_available = False
94 elif "plaintext" in backend.name.lower():
95 logger.debug("Only a plaintext keyring backend is available. Not using it.")
96 self._is_available = False
97 elif name == "chainer":
98 try:
99 import keyring.backend
100
101 backends = keyring.backend.get_all_keyring()
102
103 self._is_available = any(
104 [
105 b.name.split(" ")[0] not in ["chainer", "fail"]
106 and "plaintext" not in b.name.lower()
107 for b in backends
108 ]
109 )
110 except Exception:
111 self._is_available = False
112
113 if not self._is_available:
114 logger.warning("No suitable keyring backends were found")
115
116
117 class PasswordManager:
118 def __init__(self, config):
119 self._config = config
120 self._keyring = None
121
122 @property
123 def keyring(self):
124 if self._keyring is None:
125 self._keyring = KeyRing("poetry-repository")
126 if not self._keyring.is_available():
127 logger.warning(
128 "Using a plaintext file to store and retrieve credentials"
129 )
130
131 return self._keyring
132
133 def set_pypi_token(self, name, token):
134 if not self.keyring.is_available():
135 self._config.auth_config_source.add_property(
136 "pypi-token.{}".format(name), token
137 )
138 else:
139 self.keyring.set_password(name, "__token__", token)
140
141 def get_pypi_token(self, name):
142 if not self.keyring.is_available():
143 return self._config.get("pypi-token.{}".format(name))
144
145 return self.keyring.get_password(name, "__token__")
146
147 def delete_pypi_token(self, name):
148 if not self.keyring.is_available():
149 return self._config.auth_config_source.remove_property(
150 "pypi-token.{}".format(name)
151 )
152
153 self.keyring.delete_password(name, "__token__")
154
155 def get_http_auth(self, name):
156 auth = self._config.get("http-basic.{}".format(name))
157 if not auth:
158 return None
159
160 username, password = auth["username"], auth.get("password")
161 if password is None:
162 password = self.keyring.get_password(name, username)
163
164 return {
165 "username": username,
166 "password": password,
167 }
168
169 def set_http_password(self, name, username, password):
170 auth = {"username": username}
171
172 if not self.keyring.is_available():
173 auth["password"] = password
174 else:
175 self.keyring.set_password(name, username, password)
176
177 self._config.auth_config_source.add_property("http-basic.{}".format(name), auth)
178
179 def delete_http_password(self, name):
180 auth = self.get_http_auth(name)
181 if not auth or "username" not in auth:
182 return
183
184 try:
185 self.keyring.delete_password(name, auth["username"])
186 except KeyRingError:
187 pass
188
189 self._config.auth_config_source.remove_property("http-basic.{}".format(name))
190
[end of poetry/utils/password_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/utils/password_manager.py b/poetry/utils/password_manager.py
--- a/poetry/utils/password_manager.py
+++ b/poetry/utils/password_manager.py
@@ -155,11 +155,14 @@
def get_http_auth(self, name):
auth = self._config.get("http-basic.{}".format(name))
if not auth:
- return None
-
- username, password = auth["username"], auth.get("password")
- if password is None:
- password = self.keyring.get_password(name, username)
+ username = self._config.get("http-basic.{}.username".format(name))
+ password = self._config.get("http-basic.{}.password".format(name))
+ if not username and not password:
+ return None
+ else:
+ username, password = auth["username"], auth.get("password")
+ if password is None:
+ password = self.keyring.get_password(name, username)
return {
"username": username,
| {"golden_diff": "diff --git a/poetry/utils/password_manager.py b/poetry/utils/password_manager.py\n--- a/poetry/utils/password_manager.py\n+++ b/poetry/utils/password_manager.py\n@@ -155,11 +155,14 @@\n def get_http_auth(self, name):\n auth = self._config.get(\"http-basic.{}\".format(name))\n if not auth:\n- return None\n-\n- username, password = auth[\"username\"], auth.get(\"password\")\n- if password is None:\n- password = self.keyring.get_password(name, username)\n+ username = self._config.get(\"http-basic.{}.username\".format(name))\n+ password = self._config.get(\"http-basic.{}.password\".format(name))\n+ if not username and not password:\n+ return None\n+ else:\n+ username, password = auth[\"username\"], auth.get(\"password\")\n+ if password is None:\n+ password = self.keyring.get_password(name, username)\n \n return {\n \"username\": username,\n", "issue": "Setting credentials through env. variable are not working\n<!-- Checked checkbox should look like this: [x] -->\r\n- [ x ] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [ x ] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [ x ] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: *nix\r\n- **Poetry version**: Poetry version 1.0.0\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, but please be descriptive! Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\nI'm failing to use env. variables to set custom pypi credentials. \r\n\r\nMy pyproject.toml contains private pypi's like this:\r\n```toml\r\n[[tool.poetry.source]]\r\nurl = \"https://XXXXX/nexus/repository/pypi-central/simple\"\r\nname = \"nexus\"\r\n```\r\nI'm running this script:\r\n```sh\r\nexport POETRY_HTTP_BASIC_NEXUS_USERNAME=****\r\nexport POETRY_HTTP_BASIC_NEXUS_PASSWORD=****\r\npoetry install\r\n```\r\nand it fails with:\r\n```\r\n[EnvCommandError]\r\nCommand ['/opt/.cache/pypoetry/virtualenvs/YYYY-4zvP7SOo-py3.8/bin/pip', 'install', '--no-deps', '--index-url', 'https://XXXXX/nexus/repository/pypi-central/simple', '--extra-index-url', 'https://pypi.org/', 'six==1.12.0'] errored with the following return code 2, and output: \r\nLooking in indexes: https://RESOLVED-XXXXX/nexus/repository/pypi-central/simple, https://****:****@XXXXX/nexus/repository/epd-pypi/simple, https://pypi.org/\r\nCollecting six==1.12.0\r\n\r\n....\r\n\r\nFile \"/opt/.cache/pypoetry/virtualenvs/YYYY-4zvP7SOo-py3.8/lib/python3.8/site-packages/pip/_internal/download.py\", line 386, in handle_401\r\n username, password, save = self._prompt_for_password(parsed.netloc)\r\n File \"/opt/.cache/pypoetry/virtualenvs/YYYY-4zvP7SOo-py3.8/lib/python3.8/site-packages/pip/_internal/download.py\", line 358, in _prompt_for_password\r\n username = ask_input(\"User for %s: \" % netloc)\r\n File \"/opt/.cache/pypoetry/virtualenvs/YYYY-4zvP7SOo-py3.8/lib/python3.8/site-packages/pip/_internal/utils/misc.py\", line 281, in ask_input\r\n return input(message)\r\nEOFError: EOF when reading a line\r\nUser for XXXXX: \r\n```\r\n\r\nI investigated the code and it seems that credentials are never acquired separately from config, but always as a pair. That means that code never ask for `http-basic.nexus.password` and `http-basic.nexus.username`, but for `http-basic.nexus` then the value is used as a dict (search for `password_manager.get_http_auth` usage). I could not find single test case, so I wrote one, fill free to use it:\r\n```diff\r\ndiff --git a/tests/config/test_config.py b/tests/config/test_config.py\r\nindex 07373ad..72ad236 100644\r\n--- a/tests/config/test_config.py\r\n+++ b/tests/config/test_config.py\r\n@@ -14,3 +14,13 @@ def test_config_get_from_environment_variable(config, environ):\r\n\r\n os.environ[\"POETRY_VIRTUALENVS_CREATE\"] = \"false\"\r\n assert not config.get(\"virtualenvs.create\")\r\n+\r\n+def test_basic_http_credentials_through_env(config, environ):\r\n+ assert config.get(\"http-basic.test_repo\") is None\r\n+\r\n+ os.environ[\"POETRY_HTTP_BASIC_TEST_REPO_USERNAME\"] = \"foo\"\r\n+ os.environ[\"POETRY_HTTP_BASIC_TEST_REPO_PASSWORD\"] = \"bar\"\r\n+ credentials = config.get(\"http-basic.test-repo\")\r\n+ assert credentials is not None\r\n+ assert credentials[\"username\"] == \"foo\"\r\n+ assert credentials[\"password\"] == \"bar\"\r\n```\n", "before_files": [{"content": "import logging\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PasswordManagerError(Exception):\n\n pass\n\n\nclass KeyRingError(Exception):\n\n pass\n\n\nclass KeyRing:\n def __init__(self, namespace):\n self._namespace = namespace\n self._is_available = True\n\n self._check()\n\n def is_available(self):\n return self._is_available\n\n def get_password(self, name, username):\n if not self.is_available():\n return\n\n import keyring\n import keyring.errors\n\n name = self.get_entry_name(name)\n\n try:\n return keyring.get_password(name, username)\n except (RuntimeError, keyring.errors.KeyringError):\n raise KeyRingError(\n \"Unable to retrieve the password for {} from the key ring\".format(name)\n )\n\n def set_password(self, name, username, password):\n if not self.is_available():\n return\n\n import keyring\n import keyring.errors\n\n name = self.get_entry_name(name)\n\n try:\n keyring.set_password(name, username, password)\n except (RuntimeError, keyring.errors.KeyringError) as e:\n raise KeyRingError(\n \"Unable to store the password for {} in the key ring: {}\".format(\n name, str(e)\n )\n )\n\n def delete_password(self, name, username):\n if not self.is_available():\n return\n\n import keyring\n import keyring.errors\n\n name = self.get_entry_name(name)\n\n try:\n keyring.delete_password(name, username)\n except (RuntimeError, keyring.errors.KeyringError):\n raise KeyRingError(\n \"Unable to delete the password for {} from the key ring\".format(name)\n )\n\n def get_entry_name(self, name):\n return \"{}-{}\".format(self._namespace, name)\n\n def _check(self):\n try:\n import keyring\n except Exception as e:\n logger.debug(\"An error occurred while importing keyring: {}\".format(str(e)))\n self._is_available = False\n\n return\n\n backend = keyring.get_keyring()\n name = backend.name.split(\" \")[0]\n if name == \"fail\":\n logger.debug(\"No suitable keyring backend found\")\n self._is_available = False\n elif \"plaintext\" in backend.name.lower():\n logger.debug(\"Only a plaintext keyring backend is available. Not using it.\")\n self._is_available = False\n elif name == \"chainer\":\n try:\n import keyring.backend\n\n backends = keyring.backend.get_all_keyring()\n\n self._is_available = any(\n [\n b.name.split(\" \")[0] not in [\"chainer\", \"fail\"]\n and \"plaintext\" not in b.name.lower()\n for b in backends\n ]\n )\n except Exception:\n self._is_available = False\n\n if not self._is_available:\n logger.warning(\"No suitable keyring backends were found\")\n\n\nclass PasswordManager:\n def __init__(self, config):\n self._config = config\n self._keyring = None\n\n @property\n def keyring(self):\n if self._keyring is None:\n self._keyring = KeyRing(\"poetry-repository\")\n if not self._keyring.is_available():\n logger.warning(\n \"Using a plaintext file to store and retrieve credentials\"\n )\n\n return self._keyring\n\n def set_pypi_token(self, name, token):\n if not self.keyring.is_available():\n self._config.auth_config_source.add_property(\n \"pypi-token.{}\".format(name), token\n )\n else:\n self.keyring.set_password(name, \"__token__\", token)\n\n def get_pypi_token(self, name):\n if not self.keyring.is_available():\n return self._config.get(\"pypi-token.{}\".format(name))\n\n return self.keyring.get_password(name, \"__token__\")\n\n def delete_pypi_token(self, name):\n if not self.keyring.is_available():\n return self._config.auth_config_source.remove_property(\n \"pypi-token.{}\".format(name)\n )\n\n self.keyring.delete_password(name, \"__token__\")\n\n def get_http_auth(self, name):\n auth = self._config.get(\"http-basic.{}\".format(name))\n if not auth:\n return None\n\n username, password = auth[\"username\"], auth.get(\"password\")\n if password is None:\n password = self.keyring.get_password(name, username)\n\n return {\n \"username\": username,\n \"password\": password,\n }\n\n def set_http_password(self, name, username, password):\n auth = {\"username\": username}\n\n if not self.keyring.is_available():\n auth[\"password\"] = password\n else:\n self.keyring.set_password(name, username, password)\n\n self._config.auth_config_source.add_property(\"http-basic.{}\".format(name), auth)\n\n def delete_http_password(self, name):\n auth = self.get_http_auth(name)\n if not auth or \"username\" not in auth:\n return\n\n try:\n self.keyring.delete_password(name, auth[\"username\"])\n except KeyRingError:\n pass\n\n self._config.auth_config_source.remove_property(\"http-basic.{}\".format(name))\n", "path": "poetry/utils/password_manager.py"}]} | 3,140 | 225 |
gh_patches_debug_24821 | rasdani/github-patches | git_diff | nilearn__nilearn-3077 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nilearn.plotting overrides the matplotlib backend, causing troubles on remote SSH development
I am working on a remote settings, accessing a machine (drago/margaret for INRIA context) through a PyCharm Remote Interpreter.
I have been struggling to display nilearn figures in the scientific view of PyCharm, leading to a painful process of saving the image => rsync figures dir => visualize figures once there are on personal laptop.
I did a simple experiment drawing first a simple `plot(list(range(10)))` which is displayed and then a nilearn figure which does not show up (script at the end of the issue).
I found out the reason for this, which is that in `nilearn.plotting.__init__`, the matplotlib backend is set to `agg` if not in `[inline, agg]`, but the backend used initially (which is working for me) is `module://backend_interagg`.
Setting the backend to the initial value after importing `nilearn.datasets` fixes the problem.
I don't know exactly how those matplotlib backends work, if the list of accepted backends in `nilearn.datasets.__init__` could be extended or if we could have a more robust list of valid matplotlib backend. Also, I feel that nilearn shouldn't override matplotlib backend silently?
<!--Please fill in the following information, to the best of your ability.-->
Nilearn version: 0.7.1
### Expected behavior
Nilearn does not override matplotlib backend silently.
### Actual behavior
Nilearn sets backend to "agg" if not in ["inline", "agg"], causing trouble with remote SSH development.
### Steps and code to reproduce bug
Run the following script through SSH remote interpreter
```python
import matplotlib
initial_backend = matplotlib.get_backend().lower()
print(initial_backend)
import matplotlib.pyplot as plt
import numpy as np
arr = np.zeros((100, 100))
plt.figure()
plt.plot(list(range(10)))
plt.show()
# >> The show is displayed in PyCharm
from nilearn import datasets, plotting
print(matplotlib.get_backend().lower())
# >> Backend has switched to "agg"
sample_brain_map = datasets.fetch_neurovault_motor_task().images[0]
plotting.plot_stat_map(
sample_brain_map,
threshold=3,
title="Before setting back the backend",
)
plt.show()
# >> Does not show up
matplotlib.use(initial_backend)
plotting.plot_stat_map(
sample_brain_map,
threshold=3,
title="After setting back the backend",
)
plt.show()
# >> Shows up
```
</issue>
<code>
[start of nilearn/plotting/__init__.py]
1 """
2 Plotting code for nilearn
3 """
4 # Original Authors: Chris Filo Gorgolewski, Gael Varoquaux
5 import os
6 import sys
7 import importlib
8
9
10 ###############################################################################
11 # Make sure that we don't get DISPLAY problems when running without X on
12 # unices
13 def _set_mpl_backend():
14 # We are doing local imports here to avoid polluting our namespace
15 try:
16 import matplotlib
17 except ImportError:
18 if importlib.util.find_spec("pytest") is not None:
19 from .._utils.testing import skip_if_running_tests
20 # No need to fail when running tests
21 skip_if_running_tests('matplotlib not installed')
22 raise
23 else:
24 from ..version import (_import_module_with_version_check,
25 OPTIONAL_MATPLOTLIB_MIN_VERSION)
26 # When matplotlib was successfully imported we need to check
27 # that the version is greater that the minimum required one
28 _import_module_with_version_check('matplotlib',
29 OPTIONAL_MATPLOTLIB_MIN_VERSION)
30 current_backend = matplotlib.get_backend().lower()
31
32 if 'inline' in current_backend or 'nbagg' in current_backend:
33 return
34 # Set the backend to a non-interactive one for unices without X
35 # (see gh-2560)
36 if (sys.platform not in ('darwin', 'win32') and
37 'DISPLAY' not in os.environ):
38 matplotlib.use('Agg')
39
40
41 _set_mpl_backend()
42
43 ###############################################################################
44 from . import cm
45 from .img_plotting import (
46 plot_img, plot_anat, plot_epi, plot_roi, plot_stat_map,
47 plot_glass_brain, plot_connectome, plot_connectome_strength,
48 plot_markers, plot_prob_atlas, plot_carpet, plot_img_comparison, show)
49 from .find_cuts import find_xyz_cut_coords, find_cut_slices, \
50 find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords
51 from .matrix_plotting import (plot_matrix, plot_contrast_matrix,
52 plot_design_matrix, plot_event)
53 from .html_surface import view_surf, view_img_on_surf
54 from .html_stat_map import view_img
55 from .html_connectome import view_connectome, view_markers
56 from .surf_plotting import (plot_surf, plot_surf_stat_map, plot_surf_roi,
57 plot_img_on_surf, plot_surf_contours)
58
59 __all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',
60 'plot_roi', 'plot_stat_map', 'plot_glass_brain',
61 'plot_markers', 'plot_connectome', 'plot_prob_atlas',
62 'find_xyz_cut_coords', 'find_cut_slices',
63 'plot_img_comparison',
64 'show', 'plot_matrix',
65 'plot_design_matrix', 'plot_contrast_matrix', 'plot_event',
66 'view_surf', 'view_img_on_surf',
67 'view_img', 'view_connectome', 'view_markers',
68 'find_parcellation_cut_coords',
69 'find_probabilistic_atlas_cut_coords',
70 'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi',
71 'plot_img_on_surf', 'plot_connectome_strength', 'plot_carpet',
72 'plot_surf_contours']
73
[end of nilearn/plotting/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py
--- a/nilearn/plotting/__init__.py
+++ b/nilearn/plotting/__init__.py
@@ -2,9 +2,8 @@
Plotting code for nilearn
"""
# Original Authors: Chris Filo Gorgolewski, Gael Varoquaux
-import os
-import sys
import importlib
+import warnings
###############################################################################
@@ -29,13 +28,17 @@
OPTIONAL_MATPLOTLIB_MIN_VERSION)
current_backend = matplotlib.get_backend().lower()
- if 'inline' in current_backend or 'nbagg' in current_backend:
- return
- # Set the backend to a non-interactive one for unices without X
- # (see gh-2560)
- if (sys.platform not in ('darwin', 'win32') and
- 'DISPLAY' not in os.environ):
- matplotlib.use('Agg')
+ try:
+ # Making sure the current backend is usable by matplotlib
+ matplotlib.use(current_backend)
+ except Exception:
+ # If not, switching to default agg backend
+ matplotlib.use("Agg")
+ new_backend = matplotlib.get_backend().lower()
+
+ if new_backend != current_backend:
+ # Matplotlib backend has been changed, let's warn the user
+ warnings.warn(f"Backend changed to {new_backend}...")
_set_mpl_backend()
| {"golden_diff": "diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py\n--- a/nilearn/plotting/__init__.py\n+++ b/nilearn/plotting/__init__.py\n@@ -2,9 +2,8 @@\n Plotting code for nilearn\n \"\"\"\n # Original Authors: Chris Filo Gorgolewski, Gael Varoquaux\n-import os\n-import sys\n import importlib\n+import warnings\n \n \n ###############################################################################\n@@ -29,13 +28,17 @@\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n current_backend = matplotlib.get_backend().lower()\n \n- if 'inline' in current_backend or 'nbagg' in current_backend:\n- return\n- # Set the backend to a non-interactive one for unices without X\n- # (see gh-2560)\n- if (sys.platform not in ('darwin', 'win32') and\n- 'DISPLAY' not in os.environ):\n- matplotlib.use('Agg')\n+ try:\n+ # Making sure the current backend is usable by matplotlib\n+ matplotlib.use(current_backend)\n+ except Exception:\n+ # If not, switching to default agg backend\n+ matplotlib.use(\"Agg\")\n+ new_backend = matplotlib.get_backend().lower()\n+\n+ if new_backend != current_backend:\n+ # Matplotlib backend has been changed, let's warn the user\n+ warnings.warn(f\"Backend changed to {new_backend}...\")\n \n \n _set_mpl_backend()\n", "issue": "nilearn.plotting overrides the matplotlib backend, causing troubles on remote SSH development\nI am working on a remote settings, accessing a machine (drago/margaret for INRIA context) through a PyCharm Remote Interpreter.\r\nI have been struggling to display nilearn figures in the scientific view of PyCharm, leading to a painful process of saving the image => rsync figures dir => visualize figures once there are on personal laptop.\r\n\r\nI did a simple experiment drawing first a simple `plot(list(range(10)))` which is displayed and then a nilearn figure which does not show up (script at the end of the issue).\r\n\r\nI found out the reason for this, which is that in `nilearn.plotting.__init__`, the matplotlib backend is set to `agg` if not in `[inline, agg]`, but the backend used initially (which is working for me) is `module://backend_interagg`.\r\n\r\nSetting the backend to the initial value after importing `nilearn.datasets` fixes the problem.\r\n\r\nI don't know exactly how those matplotlib backends work, if the list of accepted backends in `nilearn.datasets.__init__` could be extended or if we could have a more robust list of valid matplotlib backend. Also, I feel that nilearn shouldn't override matplotlib backend silently? \r\n\r\n<!--Please fill in the following information, to the best of your ability.-->\r\nNilearn version: 0.7.1\r\n\r\n### Expected behavior\r\n\r\nNilearn does not override matplotlib backend silently.\r\n\r\n### Actual behavior\r\n\r\nNilearn sets backend to \"agg\" if not in [\"inline\", \"agg\"], causing trouble with remote SSH development.\r\n\r\n### Steps and code to reproduce bug\r\n\r\nRun the following script through SSH remote interpreter\r\n\r\n```python\r\nimport matplotlib\r\ninitial_backend = matplotlib.get_backend().lower()\r\nprint(initial_backend)\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\narr = np.zeros((100, 100))\r\nplt.figure()\r\nplt.plot(list(range(10)))\r\nplt.show()\r\n# >> The show is displayed in PyCharm\r\n\r\nfrom nilearn import datasets, plotting\r\nprint(matplotlib.get_backend().lower())\r\n# >> Backend has switched to \"agg\"\r\n\r\nsample_brain_map = datasets.fetch_neurovault_motor_task().images[0]\r\nplotting.plot_stat_map(\r\n sample_brain_map,\r\n threshold=3,\r\n title=\"Before setting back the backend\",\r\n)\r\n\r\nplt.show()\r\n# >> Does not show up\r\n\r\nmatplotlib.use(initial_backend)\r\nplotting.plot_stat_map(\r\n sample_brain_map,\r\n threshold=3,\r\n title=\"After setting back the backend\",\r\n)\r\n\r\nplt.show()\r\n# >> Shows up\r\n\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nPlotting code for nilearn\n\"\"\"\n# Original Authors: Chris Filo Gorgolewski, Gael Varoquaux\nimport os\nimport sys\nimport importlib\n\n\n###############################################################################\n# Make sure that we don't get DISPLAY problems when running without X on\n# unices\ndef _set_mpl_backend():\n # We are doing local imports here to avoid polluting our namespace\n try:\n import matplotlib\n except ImportError:\n if importlib.util.find_spec(\"pytest\") is not None:\n from .._utils.testing import skip_if_running_tests\n # No need to fail when running tests\n skip_if_running_tests('matplotlib not installed')\n raise\n else:\n from ..version import (_import_module_with_version_check,\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n # When matplotlib was successfully imported we need to check\n # that the version is greater that the minimum required one\n _import_module_with_version_check('matplotlib',\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n current_backend = matplotlib.get_backend().lower()\n\n if 'inline' in current_backend or 'nbagg' in current_backend:\n return\n # Set the backend to a non-interactive one for unices without X\n # (see gh-2560)\n if (sys.platform not in ('darwin', 'win32') and\n 'DISPLAY' not in os.environ):\n matplotlib.use('Agg')\n\n\n_set_mpl_backend()\n\n###############################################################################\nfrom . import cm\nfrom .img_plotting import (\n plot_img, plot_anat, plot_epi, plot_roi, plot_stat_map,\n plot_glass_brain, plot_connectome, plot_connectome_strength,\n plot_markers, plot_prob_atlas, plot_carpet, plot_img_comparison, show)\nfrom .find_cuts import find_xyz_cut_coords, find_cut_slices, \\\n find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\nfrom .matrix_plotting import (plot_matrix, plot_contrast_matrix,\n plot_design_matrix, plot_event)\nfrom .html_surface import view_surf, view_img_on_surf\nfrom .html_stat_map import view_img\nfrom .html_connectome import view_connectome, view_markers\nfrom .surf_plotting import (plot_surf, plot_surf_stat_map, plot_surf_roi,\n plot_img_on_surf, plot_surf_contours)\n\n__all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',\n 'plot_roi', 'plot_stat_map', 'plot_glass_brain',\n 'plot_markers', 'plot_connectome', 'plot_prob_atlas',\n 'find_xyz_cut_coords', 'find_cut_slices',\n 'plot_img_comparison',\n 'show', 'plot_matrix',\n 'plot_design_matrix', 'plot_contrast_matrix', 'plot_event',\n 'view_surf', 'view_img_on_surf',\n 'view_img', 'view_connectome', 'view_markers',\n 'find_parcellation_cut_coords',\n 'find_probabilistic_atlas_cut_coords',\n 'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi',\n 'plot_img_on_surf', 'plot_connectome_strength', 'plot_carpet',\n 'plot_surf_contours']\n", "path": "nilearn/plotting/__init__.py"}]} | 1,923 | 337 |
gh_patches_debug_130 | rasdani/github-patches | git_diff | svthalia__concrexit-1750 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Event registration member serializer should be read only
### Describe the bug
https://github.com/svthalia/concrexit/blob/4ab37961f50e398cc52422cdc1df66f6ab8ff2ee/website/events/api/v2/serializers/event_registration.py#L34 This serializer should be read-only
### How to reproduce
https://staging.thalia.nu/api/v2/events/150/registrations/ shows that you can POST to update the member profile, that should not be the case
### Expected behaviour
Be read only
</issue>
<code>
[start of website/events/api/v2/serializers/event_registration.py]
1 from rest_framework import serializers
2
3 from events.models import EventRegistration
4 from members.api.v2.serializers.member import MemberSerializer
5
6
7 class EventRegistrationSerializer(serializers.ModelSerializer):
8 """Serializer for event registrations."""
9
10 def __init__(self, *args, **kwargs):
11 # Don't pass the 'fields' arg up to the superclass
12 fields = kwargs.pop("fields", {"pk", "member", "name"})
13
14 # Instantiate the superclass normally
15 super().__init__(*args, **kwargs)
16
17 allowed = set(fields)
18 existing = set(self.fields.keys())
19 for field_name in existing - allowed:
20 self.fields.pop(field_name)
21
22 class Meta:
23 model = EventRegistration
24 fields = (
25 "pk",
26 "present",
27 "queue_position",
28 "date",
29 "payment",
30 "member",
31 "name",
32 )
33
34 member = MemberSerializer(detailed=False)
35
[end of website/events/api/v2/serializers/event_registration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py
--- a/website/events/api/v2/serializers/event_registration.py
+++ b/website/events/api/v2/serializers/event_registration.py
@@ -31,4 +31,4 @@
"name",
)
- member = MemberSerializer(detailed=False)
+ member = MemberSerializer(detailed=False, read_only=True)
| {"golden_diff": "diff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py\n--- a/website/events/api/v2/serializers/event_registration.py\n+++ b/website/events/api/v2/serializers/event_registration.py\n@@ -31,4 +31,4 @@\n \"name\",\n )\n \n- member = MemberSerializer(detailed=False)\n+ member = MemberSerializer(detailed=False, read_only=True)\n", "issue": "Event registration member serializer should be read only\n### Describe the bug\r\nhttps://github.com/svthalia/concrexit/blob/4ab37961f50e398cc52422cdc1df66f6ab8ff2ee/website/events/api/v2/serializers/event_registration.py#L34 This serializer should be read-only\r\n\r\n### How to reproduce\r\nhttps://staging.thalia.nu/api/v2/events/150/registrations/ shows that you can POST to update the member profile, that should not be the case\r\n\r\n### Expected behaviour\r\nBe read only\r\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom events.models import EventRegistration\nfrom members.api.v2.serializers.member import MemberSerializer\n\n\nclass EventRegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for event registrations.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # Don't pass the 'fields' arg up to the superclass\n fields = kwargs.pop(\"fields\", {\"pk\", \"member\", \"name\"})\n\n # Instantiate the superclass normally\n super().__init__(*args, **kwargs)\n\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n class Meta:\n model = EventRegistration\n fields = (\n \"pk\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"payment\",\n \"member\",\n \"name\",\n )\n\n member = MemberSerializer(detailed=False)\n", "path": "website/events/api/v2/serializers/event_registration.py"}]} | 935 | 103 |
gh_patches_debug_32911 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2745 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CA not running with new image
it seems like CA isn't running right now w/ the switch to Alpine, need to investigate
</issue>
<code>
[start of openstates/ca/models.py]
1 from sqlalchemy import (Column, Integer, String, ForeignKey,
2 DateTime, Numeric, UnicodeText)
3 from sqlalchemy.sql import and_
4 from sqlalchemy.orm import backref, relation
5 from sqlalchemy.ext.declarative import declarative_base
6
7 from lxml import etree
8
9 Base = declarative_base()
10
11
12 class CABill(Base):
13 __tablename__ = "bill_tbl"
14
15 bill_id = Column(String(20), primary_key=True)
16 session_year = Column(String(8))
17 session_num = Column(String(2))
18 measure_type = Column(String(4))
19 measure_num = Column(Integer)
20 measure_state = Column(String(40))
21 chapter_year = Column(String(4))
22 chapter_type = Column(String(10))
23 chapter_session_num = Column(String(2))
24 chapter_num = Column(String(10))
25 latest_bill_version_id = Column(String(30))
26 active_flg = Column(String(1))
27 trans_uid = Column(String(30))
28 trans_update = Column(DateTime)
29 current_location = Column(String(200))
30 current_secondary_loc = Column(String(60))
31 current_house = Column(String(60))
32 current_status = Column(String(60))
33
34 actions = relation('CABillAction', backref=backref('bill'),
35 order_by="CABillAction.bill_history_id")
36
37 versions = relation('CABillVersion', backref=backref('bill'),
38 order_by='desc(CABillVersion.version_num)')
39
40 votes = relation('CAVoteSummary', backref=backref('bill'),
41 order_by='CAVoteSummary.vote_date_time')
42
43 @property
44 def short_bill_id(self):
45 return "%s%d" % (self.measure_type, self.measure_num)
46
47
48 class CABillVersion(Base):
49 __tablename__ = "bill_version_tbl"
50
51 bill_version_id = Column(String(30), primary_key=True)
52 bill_id = Column(String(19), ForeignKey(CABill.bill_id))
53 version_num = Column(Integer)
54 bill_version_action_date = Column(DateTime)
55 bill_version_action = Column(String(100))
56 request_num = Column(String(10))
57 subject = Column(String(1000))
58 vote_required = Column(String(100))
59 appropriation = Column(String(3))
60 fiscal_committee = Column(String(3))
61 local_program = Column(String(3))
62 substantive_changes = Column(String(3))
63 urgency = Column(String(3))
64 taxlevy = Column(String(3))
65 bill_xml = Column(UnicodeText)
66 active_flg = Column(String(1))
67 trans_uid = Column(String(30))
68 trans_update = Column(DateTime)
69
70 @property
71 def xml(self):
72 if '_xml' not in self.__dict__:
73 self._xml = etree.fromstring(self.bill_xml.encode('utf-8'),
74 etree.XMLParser(recover=True))
75 return self._xml
76
77 @property
78 def title(self):
79 text = self.xml.xpath("string(//*[local-name() = 'Title'])") or ''
80 return text.strip()
81
82 @property
83 def short_title(self):
84 text = self.xml.xpath("string(//*[local-name() = 'Subject'])") or ''
85 return text.strip()
86
87
88 class CABillVersionAuthor(Base):
89 __tablename__ = "bill_version_authors_tbl"
90
91 # Note: the primary_keys here are a lie - the actual table has no pk
92 # but SQLAlchemy seems to demand one. Furthermore, I get strange
93 # exceptions when trying to use bill_version_id as part of a
94 # composite primary key.
95
96 bill_version_id = Column(String(30),
97 ForeignKey(CABillVersion.bill_version_id))
98 type = Column(String(15))
99 house = Column(String(100))
100 name = Column(String(100), primary_key=True)
101 contribution = Column(String(100))
102 committee_members = Column(String(2000))
103 active_flg = Column(String(1))
104 trans_uid = Column(String(30))
105 trans_update = Column(DateTime, primary_key=True)
106 primary_author_flg = Column(String(1))
107
108 version = relation(CABillVersion, backref=backref('authors'))
109
110
111 class CABillAction(Base):
112 __tablename__ = "bill_history_tbl"
113
114 bill_id = Column(String(20), ForeignKey(CABill.bill_id))
115 bill_history_id = Column(Numeric, primary_key=True)
116 action_date = Column(DateTime)
117 action = Column(String(2000))
118 trans_uid = Column(String(20))
119 trans_update_dt = Column(DateTime)
120 action_sequence = Column(Integer)
121 action_code = Column(String(5))
122 action_status = Column(String(60))
123 primary_location = Column(String(60))
124 secondary_location = Column(String(60))
125 ternary_location = Column(String(60))
126 end_status = Column(String(60))
127
128 @property
129 def actor(self):
130 # TODO: replace committee codes w/ names
131
132 if not self.primary_location:
133 return None
134
135 actor = self.primary_location
136
137 if self.secondary_location:
138 actor += " (%s" % self.secondary_location
139
140 if self.ternary_location:
141 actor += " %s" % self.ternary_location
142
143 actor += ")"
144
145 return actor
146
147
148 class CALegislator(Base):
149 __tablename__ = 'legislator_tbl'
150
151 district = Column(String(5), primary_key=True)
152 session_year = Column(String(8), primary_key=True)
153 legislator_name = Column(String(30), primary_key=True)
154 house_type = Column(String(1), primary_key=True)
155 author_name = Column(String(200))
156 first_name = Column(String(30))
157 last_name = Column(String(30))
158 middle_initial = Column(String(1))
159 name_suffix = Column(String(12))
160 name_title = Column(String(34))
161 web_name_title = Column(String(34))
162 party = Column(String(4))
163 active_flg = Column(String(1))
164 trans_uid = Column(String(30))
165 trans_update = Column(DateTime)
166
167
168 class CAMotion(Base):
169 __tablename__ = "bill_motion_tbl"
170
171 motion_id = Column(Integer, primary_key=True)
172 motion_text = Column(String(250))
173 trans_uid = Column(String(30))
174 trans_update = Column(DateTime)
175
176
177 class CALocation(Base):
178 __tablename__ = "location_code_tbl"
179
180 session_year = Column(String(8), primary_key=True)
181 location_code = Column(String(6), primary_key=True)
182 location_type = Column(String(1), primary_key=True)
183 consent_calendar_code = Column(String(2), primary_key=True)
184 description = Column(String(60))
185 long_description = Column(String(200))
186 active_flg = Column(String(1))
187 trans_uid = Column(String(30))
188 trans_update = Column(DateTime)
189
190
191 class CAVoteSummary(Base):
192 __tablename__ = "bill_summary_vote_tbl"
193
194 bill_id = Column(String(20), ForeignKey(CABill.bill_id), primary_key=True)
195 location_code = Column(String(6), ForeignKey(CALocation.location_code), primary_key=True)
196 vote_date_time = Column(DateTime, primary_key=True)
197 vote_date_seq = Column(Integer, primary_key=True)
198 motion_id = Column(Integer, ForeignKey(CAMotion.motion_id), primary_key=True)
199 ayes = Column(Integer)
200 noes = Column(Integer)
201 abstain = Column(Integer)
202 vote_result = Column(String(6))
203 trans_uid = Column(String(30))
204 trans_update = Column(DateTime, primary_key=True)
205
206 motion = relation(CAMotion)
207 location = relation(CALocation)
208
209 @property
210 def threshold(self):
211 # This may not always be true...
212 if self.location_code != "AFLOOR" and self.location_code != "SFLOOR":
213 return '1/2'
214
215 # Get the associated bill version (probably?)
216 version = next(filter(lambda v: v.bill_version_action_date <= self.vote_date_time,
217 self.bill.versions))
218
219 if version.vote_required == 'Majority':
220 return '1/2'
221 else:
222 return '2/3'
223
224
225 class CAVoteDetail(Base):
226 __tablename__ = "bill_detail_vote_tbl"
227
228 bill_id = Column(String(20), ForeignKey(CABill.bill_id),
229 ForeignKey(CAVoteSummary.bill_id), primary_key=True)
230 location_code = Column(String(6), ForeignKey(CAVoteSummary.location_code),
231 primary_key=True)
232 legislator_name = Column(String(50), primary_key=True)
233 vote_date_time = Column(DateTime, ForeignKey(CAVoteSummary.vote_date_time),
234 primary_key=True)
235 vote_date_seq = Column(Integer, ForeignKey(CAVoteSummary.vote_date_seq),
236 primary_key=True)
237 vote_code = Column(String(5), primary_key=True)
238 motion_id = Column(Integer, ForeignKey(CAVoteSummary.motion_id),
239 primary_key=True)
240 trans_uid = Column(String(30), primary_key=True)
241 trans_update = Column(DateTime, primary_key=True)
242
243 bill = relation(CABill, backref=backref('detail_votes'))
244 summary = relation(
245 CAVoteSummary,
246 primaryjoin=and_(CAVoteSummary.bill_id == bill_id,
247 CAVoteSummary.location_code == location_code,
248 CAVoteSummary.vote_date_time == vote_date_time,
249 CAVoteSummary.vote_date_seq == vote_date_seq,
250 CAVoteSummary.motion_id == motion_id),
251 backref=backref('votes'))
252
253
254 class CACommitteeHearing(Base):
255 __tablename__ = "committee_hearing_tbl"
256
257 bill_id = Column(String(20), ForeignKey(CABill.bill_id),
258 ForeignKey(CAVoteSummary.bill_id), primary_key=True)
259 committee_type = Column(String(2), primary_key=True)
260 committee_nr = Column(Integer, primary_key=True)
261 hearing_date = Column(DateTime, primary_key=True)
262 location_code = Column(String(6), primary_key=True)
263 trans_uid = Column(String(30), primary_key=True)
264 trans_update_date = Column(DateTime, primary_key=True)
265
266 bill = relation(CABill, backref=backref('committee_hearings'))
267
[end of openstates/ca/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/ca/models.py b/openstates/ca/models.py
--- a/openstates/ca/models.py
+++ b/openstates/ca/models.py
@@ -1,7 +1,7 @@
from sqlalchemy import (Column, Integer, String, ForeignKey,
DateTime, Numeric, UnicodeText)
from sqlalchemy.sql import and_
-from sqlalchemy.orm import backref, relation
+from sqlalchemy.orm import backref, relation, foreign
from sqlalchemy.ext.declarative import declarative_base
from lxml import etree
@@ -225,8 +225,7 @@
class CAVoteDetail(Base):
__tablename__ = "bill_detail_vote_tbl"
- bill_id = Column(String(20), ForeignKey(CABill.bill_id),
- ForeignKey(CAVoteSummary.bill_id), primary_key=True)
+ bill_id = Column(String(20), ForeignKey(CABill.bill_id), primary_key=True)
location_code = Column(String(6), ForeignKey(CAVoteSummary.location_code),
primary_key=True)
legislator_name = Column(String(50), primary_key=True)
@@ -240,10 +239,12 @@
trans_uid = Column(String(30), primary_key=True)
trans_update = Column(DateTime, primary_key=True)
- bill = relation(CABill, backref=backref('detail_votes'))
+ bill = relation(CABill,
+ primaryjoin="CABill.bill_id == foreign(CAVoteDetail.bill_id)",
+ backref=backref('detail_votes'))
summary = relation(
CAVoteSummary,
- primaryjoin=and_(CAVoteSummary.bill_id == bill_id,
+ primaryjoin=and_(CAVoteSummary.bill_id == foreign(bill_id),
CAVoteSummary.location_code == location_code,
CAVoteSummary.vote_date_time == vote_date_time,
CAVoteSummary.vote_date_seq == vote_date_seq,
| {"golden_diff": "diff --git a/openstates/ca/models.py b/openstates/ca/models.py\n--- a/openstates/ca/models.py\n+++ b/openstates/ca/models.py\n@@ -1,7 +1,7 @@\n from sqlalchemy import (Column, Integer, String, ForeignKey,\n DateTime, Numeric, UnicodeText)\n from sqlalchemy.sql import and_\n-from sqlalchemy.orm import backref, relation\n+from sqlalchemy.orm import backref, relation, foreign\n from sqlalchemy.ext.declarative import declarative_base\n \n from lxml import etree\n@@ -225,8 +225,7 @@\n class CAVoteDetail(Base):\n __tablename__ = \"bill_detail_vote_tbl\"\n \n- bill_id = Column(String(20), ForeignKey(CABill.bill_id),\n- ForeignKey(CAVoteSummary.bill_id), primary_key=True)\n+ bill_id = Column(String(20), ForeignKey(CABill.bill_id), primary_key=True)\n location_code = Column(String(6), ForeignKey(CAVoteSummary.location_code),\n primary_key=True)\n legislator_name = Column(String(50), primary_key=True)\n@@ -240,10 +239,12 @@\n trans_uid = Column(String(30), primary_key=True)\n trans_update = Column(DateTime, primary_key=True)\n \n- bill = relation(CABill, backref=backref('detail_votes'))\n+ bill = relation(CABill,\n+ primaryjoin=\"CABill.bill_id == foreign(CAVoteDetail.bill_id)\",\n+ backref=backref('detail_votes'))\n summary = relation(\n CAVoteSummary,\n- primaryjoin=and_(CAVoteSummary.bill_id == bill_id,\n+ primaryjoin=and_(CAVoteSummary.bill_id == foreign(bill_id),\n CAVoteSummary.location_code == location_code,\n CAVoteSummary.vote_date_time == vote_date_time,\n CAVoteSummary.vote_date_seq == vote_date_seq,\n", "issue": "CA not running with new image\nit seems like CA isn't running right now w/ the switch to Alpine, need to investigate\n", "before_files": [{"content": "from sqlalchemy import (Column, Integer, String, ForeignKey,\n DateTime, Numeric, UnicodeText)\nfrom sqlalchemy.sql import and_\nfrom sqlalchemy.orm import backref, relation\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom lxml import etree\n\nBase = declarative_base()\n\n\nclass CABill(Base):\n __tablename__ = \"bill_tbl\"\n\n bill_id = Column(String(20), primary_key=True)\n session_year = Column(String(8))\n session_num = Column(String(2))\n measure_type = Column(String(4))\n measure_num = Column(Integer)\n measure_state = Column(String(40))\n chapter_year = Column(String(4))\n chapter_type = Column(String(10))\n chapter_session_num = Column(String(2))\n chapter_num = Column(String(10))\n latest_bill_version_id = Column(String(30))\n active_flg = Column(String(1))\n trans_uid = Column(String(30))\n trans_update = Column(DateTime)\n current_location = Column(String(200))\n current_secondary_loc = Column(String(60))\n current_house = Column(String(60))\n current_status = Column(String(60))\n\n actions = relation('CABillAction', backref=backref('bill'),\n order_by=\"CABillAction.bill_history_id\")\n\n versions = relation('CABillVersion', backref=backref('bill'),\n order_by='desc(CABillVersion.version_num)')\n\n votes = relation('CAVoteSummary', backref=backref('bill'),\n order_by='CAVoteSummary.vote_date_time')\n\n @property\n def short_bill_id(self):\n return \"%s%d\" % (self.measure_type, self.measure_num)\n\n\nclass CABillVersion(Base):\n __tablename__ = \"bill_version_tbl\"\n\n bill_version_id = Column(String(30), primary_key=True)\n bill_id = Column(String(19), ForeignKey(CABill.bill_id))\n version_num = Column(Integer)\n bill_version_action_date = Column(DateTime)\n bill_version_action = Column(String(100))\n request_num = Column(String(10))\n subject = Column(String(1000))\n vote_required = Column(String(100))\n appropriation = Column(String(3))\n fiscal_committee = Column(String(3))\n local_program = Column(String(3))\n substantive_changes = Column(String(3))\n urgency = Column(String(3))\n taxlevy = Column(String(3))\n bill_xml = Column(UnicodeText)\n active_flg = Column(String(1))\n trans_uid = Column(String(30))\n trans_update = Column(DateTime)\n\n @property\n def xml(self):\n if '_xml' not in self.__dict__:\n self._xml = etree.fromstring(self.bill_xml.encode('utf-8'),\n etree.XMLParser(recover=True))\n return self._xml\n\n @property\n def title(self):\n text = self.xml.xpath(\"string(//*[local-name() = 'Title'])\") or ''\n return text.strip()\n\n @property\n def short_title(self):\n text = self.xml.xpath(\"string(//*[local-name() = 'Subject'])\") or ''\n return text.strip()\n\n\nclass CABillVersionAuthor(Base):\n __tablename__ = \"bill_version_authors_tbl\"\n\n # Note: the primary_keys here are a lie - the actual table has no pk\n # but SQLAlchemy seems to demand one. Furthermore, I get strange\n # exceptions when trying to use bill_version_id as part of a\n # composite primary key.\n\n bill_version_id = Column(String(30),\n ForeignKey(CABillVersion.bill_version_id))\n type = Column(String(15))\n house = Column(String(100))\n name = Column(String(100), primary_key=True)\n contribution = Column(String(100))\n committee_members = Column(String(2000))\n active_flg = Column(String(1))\n trans_uid = Column(String(30))\n trans_update = Column(DateTime, primary_key=True)\n primary_author_flg = Column(String(1))\n\n version = relation(CABillVersion, backref=backref('authors'))\n\n\nclass CABillAction(Base):\n __tablename__ = \"bill_history_tbl\"\n\n bill_id = Column(String(20), ForeignKey(CABill.bill_id))\n bill_history_id = Column(Numeric, primary_key=True)\n action_date = Column(DateTime)\n action = Column(String(2000))\n trans_uid = Column(String(20))\n trans_update_dt = Column(DateTime)\n action_sequence = Column(Integer)\n action_code = Column(String(5))\n action_status = Column(String(60))\n primary_location = Column(String(60))\n secondary_location = Column(String(60))\n ternary_location = Column(String(60))\n end_status = Column(String(60))\n\n @property\n def actor(self):\n # TODO: replace committee codes w/ names\n\n if not self.primary_location:\n return None\n\n actor = self.primary_location\n\n if self.secondary_location:\n actor += \" (%s\" % self.secondary_location\n\n if self.ternary_location:\n actor += \" %s\" % self.ternary_location\n\n actor += \")\"\n\n return actor\n\n\nclass CALegislator(Base):\n __tablename__ = 'legislator_tbl'\n\n district = Column(String(5), primary_key=True)\n session_year = Column(String(8), primary_key=True)\n legislator_name = Column(String(30), primary_key=True)\n house_type = Column(String(1), primary_key=True)\n author_name = Column(String(200))\n first_name = Column(String(30))\n last_name = Column(String(30))\n middle_initial = Column(String(1))\n name_suffix = Column(String(12))\n name_title = Column(String(34))\n web_name_title = Column(String(34))\n party = Column(String(4))\n active_flg = Column(String(1))\n trans_uid = Column(String(30))\n trans_update = Column(DateTime)\n\n\nclass CAMotion(Base):\n __tablename__ = \"bill_motion_tbl\"\n\n motion_id = Column(Integer, primary_key=True)\n motion_text = Column(String(250))\n trans_uid = Column(String(30))\n trans_update = Column(DateTime)\n\n\nclass CALocation(Base):\n __tablename__ = \"location_code_tbl\"\n\n session_year = Column(String(8), primary_key=True)\n location_code = Column(String(6), primary_key=True)\n location_type = Column(String(1), primary_key=True)\n consent_calendar_code = Column(String(2), primary_key=True)\n description = Column(String(60))\n long_description = Column(String(200))\n active_flg = Column(String(1))\n trans_uid = Column(String(30))\n trans_update = Column(DateTime)\n\n\nclass CAVoteSummary(Base):\n __tablename__ = \"bill_summary_vote_tbl\"\n\n bill_id = Column(String(20), ForeignKey(CABill.bill_id), primary_key=True)\n location_code = Column(String(6), ForeignKey(CALocation.location_code), primary_key=True)\n vote_date_time = Column(DateTime, primary_key=True)\n vote_date_seq = Column(Integer, primary_key=True)\n motion_id = Column(Integer, ForeignKey(CAMotion.motion_id), primary_key=True)\n ayes = Column(Integer)\n noes = Column(Integer)\n abstain = Column(Integer)\n vote_result = Column(String(6))\n trans_uid = Column(String(30))\n trans_update = Column(DateTime, primary_key=True)\n\n motion = relation(CAMotion)\n location = relation(CALocation)\n\n @property\n def threshold(self):\n # This may not always be true...\n if self.location_code != \"AFLOOR\" and self.location_code != \"SFLOOR\":\n return '1/2'\n\n # Get the associated bill version (probably?)\n version = next(filter(lambda v: v.bill_version_action_date <= self.vote_date_time,\n self.bill.versions))\n\n if version.vote_required == 'Majority':\n return '1/2'\n else:\n return '2/3'\n\n\nclass CAVoteDetail(Base):\n __tablename__ = \"bill_detail_vote_tbl\"\n\n bill_id = Column(String(20), ForeignKey(CABill.bill_id),\n ForeignKey(CAVoteSummary.bill_id), primary_key=True)\n location_code = Column(String(6), ForeignKey(CAVoteSummary.location_code),\n primary_key=True)\n legislator_name = Column(String(50), primary_key=True)\n vote_date_time = Column(DateTime, ForeignKey(CAVoteSummary.vote_date_time),\n primary_key=True)\n vote_date_seq = Column(Integer, ForeignKey(CAVoteSummary.vote_date_seq),\n primary_key=True)\n vote_code = Column(String(5), primary_key=True)\n motion_id = Column(Integer, ForeignKey(CAVoteSummary.motion_id),\n primary_key=True)\n trans_uid = Column(String(30), primary_key=True)\n trans_update = Column(DateTime, primary_key=True)\n\n bill = relation(CABill, backref=backref('detail_votes'))\n summary = relation(\n CAVoteSummary,\n primaryjoin=and_(CAVoteSummary.bill_id == bill_id,\n CAVoteSummary.location_code == location_code,\n CAVoteSummary.vote_date_time == vote_date_time,\n CAVoteSummary.vote_date_seq == vote_date_seq,\n CAVoteSummary.motion_id == motion_id),\n backref=backref('votes'))\n\n\nclass CACommitteeHearing(Base):\n __tablename__ = \"committee_hearing_tbl\"\n\n bill_id = Column(String(20), ForeignKey(CABill.bill_id),\n ForeignKey(CAVoteSummary.bill_id), primary_key=True)\n committee_type = Column(String(2), primary_key=True)\n committee_nr = Column(Integer, primary_key=True)\n hearing_date = Column(DateTime, primary_key=True)\n location_code = Column(String(6), primary_key=True)\n trans_uid = Column(String(30), primary_key=True)\n trans_update_date = Column(DateTime, primary_key=True)\n\n bill = relation(CABill, backref=backref('committee_hearings'))\n", "path": "openstates/ca/models.py"}]} | 3,471 | 408 |
gh_patches_debug_30897 | rasdani/github-patches | git_diff | encode__starlette-186 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Session middleware is highly insecure
The session middleware does not have any timestamp component to it, which means that if a session happens to be somehow leaked, it can be reused any time in the future.
Can we have a timestamp component added to the session, so that the session middleware can reject such sessions after a reasonable period of time (e.g. 24 hours)?
</issue>
<code>
[start of starlette/middleware/sessions.py]
1 import functools
2 import json
3 from base64 import b64decode, b64encode
4
5 import itsdangerous
6
7 from starlette.datastructures import MutableHeaders
8 from starlette.requests import Request
9 from starlette.types import ASGIApp, ASGIInstance, Message, Receive, Scope, Send
10
11
12 class SessionMiddleware:
13 def __init__(
14 self, app: ASGIApp, secret_key: str, session_cookie: str = "session"
15 ) -> None:
16 self.app = app
17 self.signer = itsdangerous.Signer(secret_key)
18 self.session_cookie = session_cookie
19
20 def __call__(self, scope: Scope) -> ASGIInstance:
21 if scope["type"] in ("http", "websocket"):
22 request = Request(scope)
23 if self.session_cookie in request.cookies:
24 data = request.cookies[self.session_cookie].encode("utf-8")
25 data = self.signer.unsign(data)
26 scope["session"] = json.loads(b64decode(data))
27 else:
28 scope["session"] = {}
29 return functools.partial(self.asgi, scope=scope)
30 return self.app(scope) # pragma: no cover
31
32 async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:
33 was_empty_session = not scope["session"]
34 inner = self.app(scope)
35
36 async def sender(message: Message) -> None:
37 if message["type"] == "http.response.start":
38 if scope["session"]:
39 # We have session data to persist.
40 data = b64encode(json.dumps(scope["session"]).encode("utf-8"))
41 data = self.signer.sign(data)
42 headers = MutableHeaders(scope=message)
43 header_value = "%s=%s" % (self.session_cookie, data.decode("utf-8"))
44 headers.append("Set-Cookie", header_value)
45 elif not was_empty_session:
46 # The session has been cleared.
47 headers = MutableHeaders(scope=message)
48 header_value = "%s=%s" % (
49 self.session_cookie,
50 "null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT",
51 )
52 headers.append("Set-Cookie", header_value)
53 await send(message)
54
55 await inner(receive, sender)
56
[end of starlette/middleware/sessions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py
--- a/starlette/middleware/sessions.py
+++ b/starlette/middleware/sessions.py
@@ -3,6 +3,7 @@
from base64 import b64decode, b64encode
import itsdangerous
+from itsdangerous.exc import BadTimeSignature, SignatureExpired
from starlette.datastructures import MutableHeaders
from starlette.requests import Request
@@ -11,19 +12,27 @@
class SessionMiddleware:
def __init__(
- self, app: ASGIApp, secret_key: str, session_cookie: str = "session"
+ self,
+ app: ASGIApp,
+ secret_key: str,
+ session_cookie: str = "session",
+ max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds
) -> None:
self.app = app
- self.signer = itsdangerous.Signer(secret_key)
+ self.signer = itsdangerous.TimestampSigner(secret_key)
self.session_cookie = session_cookie
+ self.max_age = max_age
def __call__(self, scope: Scope) -> ASGIInstance:
if scope["type"] in ("http", "websocket"):
request = Request(scope)
if self.session_cookie in request.cookies:
data = request.cookies[self.session_cookie].encode("utf-8")
- data = self.signer.unsign(data)
- scope["session"] = json.loads(b64decode(data))
+ try:
+ data = self.signer.unsign(data, max_age=self.max_age)
+ scope["session"] = json.loads(b64decode(data))
+ except (BadTimeSignature, SignatureExpired):
+ scope["session"] = {}
else:
scope["session"] = {}
return functools.partial(self.asgi, scope=scope)
| {"golden_diff": "diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py\n--- a/starlette/middleware/sessions.py\n+++ b/starlette/middleware/sessions.py\n@@ -3,6 +3,7 @@\n from base64 import b64decode, b64encode\n \n import itsdangerous\n+from itsdangerous.exc import BadTimeSignature, SignatureExpired\n \n from starlette.datastructures import MutableHeaders\n from starlette.requests import Request\n@@ -11,19 +12,27 @@\n \n class SessionMiddleware:\n def __init__(\n- self, app: ASGIApp, secret_key: str, session_cookie: str = \"session\"\n+ self,\n+ app: ASGIApp,\n+ secret_key: str,\n+ session_cookie: str = \"session\",\n+ max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds\n ) -> None:\n self.app = app\n- self.signer = itsdangerous.Signer(secret_key)\n+ self.signer = itsdangerous.TimestampSigner(secret_key)\n self.session_cookie = session_cookie\n+ self.max_age = max_age\n \n def __call__(self, scope: Scope) -> ASGIInstance:\n if scope[\"type\"] in (\"http\", \"websocket\"):\n request = Request(scope)\n if self.session_cookie in request.cookies:\n data = request.cookies[self.session_cookie].encode(\"utf-8\")\n- data = self.signer.unsign(data)\n- scope[\"session\"] = json.loads(b64decode(data))\n+ try:\n+ data = self.signer.unsign(data, max_age=self.max_age)\n+ scope[\"session\"] = json.loads(b64decode(data))\n+ except (BadTimeSignature, SignatureExpired):\n+ scope[\"session\"] = {}\n else:\n scope[\"session\"] = {}\n return functools.partial(self.asgi, scope=scope)\n", "issue": "Session middleware is highly insecure\nThe session middleware does not have any timestamp component to it, which means that if a session happens to be somehow leaked, it can be reused any time in the future.\r\n\r\nCan we have a timestamp component added to the session, so that the session middleware can reject such sessions after a reasonable period of time (e.g. 24 hours)?\n", "before_files": [{"content": "import functools\nimport json\nfrom base64 import b64decode, b64encode\n\nimport itsdangerous\n\nfrom starlette.datastructures import MutableHeaders\nfrom starlette.requests import Request\nfrom starlette.types import ASGIApp, ASGIInstance, Message, Receive, Scope, Send\n\n\nclass SessionMiddleware:\n def __init__(\n self, app: ASGIApp, secret_key: str, session_cookie: str = \"session\"\n ) -> None:\n self.app = app\n self.signer = itsdangerous.Signer(secret_key)\n self.session_cookie = session_cookie\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n if scope[\"type\"] in (\"http\", \"websocket\"):\n request = Request(scope)\n if self.session_cookie in request.cookies:\n data = request.cookies[self.session_cookie].encode(\"utf-8\")\n data = self.signer.unsign(data)\n scope[\"session\"] = json.loads(b64decode(data))\n else:\n scope[\"session\"] = {}\n return functools.partial(self.asgi, scope=scope)\n return self.app(scope) # pragma: no cover\n\n async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:\n was_empty_session = not scope[\"session\"]\n inner = self.app(scope)\n\n async def sender(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n if scope[\"session\"]:\n # We have session data to persist.\n data = b64encode(json.dumps(scope[\"session\"]).encode(\"utf-8\"))\n data = self.signer.sign(data)\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s\" % (self.session_cookie, data.decode(\"utf-8\"))\n headers.append(\"Set-Cookie\", header_value)\n elif not was_empty_session:\n # The session has been cleared.\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s\" % (\n self.session_cookie,\n \"null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\",\n )\n headers.append(\"Set-Cookie\", header_value)\n await send(message)\n\n await inner(receive, sender)\n", "path": "starlette/middleware/sessions.py"}]} | 1,211 | 428 |
gh_patches_debug_21051 | rasdani/github-patches | git_diff | geopandas__geopandas-512 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG/COMPAT: missing values handling / fillna no longer working with shapely 1.6
See https://github.com/Toblerity/Shapely/issues/510 for more detailed exploration of the problem
Temporarily pinned our CI to older shapely version (but kept one with the latest shapely in the allowed failures section) in https://github.com/geopandas/geopandas/pull/508
</issue>
<code>
[start of geopandas/geoseries.py]
1 from functools import partial
2 import json
3 from warnings import warn
4
5 import numpy as np
6 from pandas import Series, DataFrame
7
8 import pyproj
9 from shapely.geometry import shape, Polygon, Point
10 from shapely.geometry.collection import GeometryCollection
11 from shapely.geometry.base import BaseGeometry
12 from shapely.ops import transform
13
14 from geopandas.plotting import plot_series
15 from geopandas.base import GeoPandasBase, _series_unary_op, _CoordinateIndexer
16
17
18 def _is_empty(x):
19 try:
20 return x.is_empty
21 except:
22 return False
23
24
25 class GeoSeries(GeoPandasBase, Series):
26 """A Series object designed to store shapely geometry objects."""
27 _metadata = ['name', 'crs']
28
29 def __new__(cls, *args, **kwargs):
30 kwargs.pop('crs', None)
31 arr = Series.__new__(cls)
32 if type(arr) is GeoSeries:
33 return arr
34 else:
35 return arr.view(GeoSeries)
36
37 def __init__(self, *args, **kwargs):
38 # fix problem for scalar geometries passed
39 if len(args) == 1 and isinstance(args[0], BaseGeometry):
40 args = ([args[0]],)
41
42 crs = kwargs.pop('crs', None)
43
44 super(GeoSeries, self).__init__(*args, **kwargs)
45 self.crs = crs
46 self._invalidate_sindex()
47
48 def append(self, *args, **kwargs):
49 return self._wrapped_pandas_method('append', *args, **kwargs)
50
51 @property
52 def geometry(self):
53 return self
54
55 @property
56 def x(self):
57 """Return the x location of point geometries in a GeoSeries"""
58 if (self.geom_type == "Point").all():
59 return _series_unary_op(self, 'x', null_value=np.nan)
60 else:
61 message = "x attribute access only provided for Point geometries"
62 raise ValueError(message)
63
64 @property
65 def y(self):
66 """Return the y location of point geometries in a GeoSeries"""
67 if (self.geom_type == "Point").all():
68 return _series_unary_op(self, 'y', null_value=np.nan)
69 else:
70 message = "y attribute access only provided for Point geometries"
71 raise ValueError(message)
72
73 @classmethod
74 def from_file(cls, filename, **kwargs):
75 """
76 Alternate constructor to create a GeoSeries from a file
77
78 Parameters
79 ----------
80
81 filename : str
82 File path or file handle to read from. Depending on which kwargs
83 are included, the content of filename may vary, see:
84 http://toblerity.github.io/fiona/README.html#usage
85 for usage details.
86 kwargs : key-word arguments
87 These arguments are passed to fiona.open, and can be used to
88 access multi-layer data, data stored within archives (zip files),
89 etc.
90
91 """
92 import fiona
93 geoms = []
94 with fiona.open(filename, **kwargs) as f:
95 crs = f.crs
96 for rec in f:
97 geoms.append(shape(rec['geometry']))
98 g = GeoSeries(geoms)
99 g.crs = crs
100 return g
101
102 @property
103 def __geo_interface__(self):
104 """Returns a GeoSeries as a python feature collection
105 """
106 from geopandas import GeoDataFrame
107 return GeoDataFrame({'geometry': self}).__geo_interface__
108
109 def to_file(self, filename, driver="ESRI Shapefile", **kwargs):
110 from geopandas import GeoDataFrame
111 data = GeoDataFrame({"geometry": self,
112 "id":self.index.values},
113 index=self.index)
114 data.crs = self.crs
115 data.to_file(filename, driver, **kwargs)
116
117 #
118 # Implement pandas methods
119 #
120
121 @property
122 def _constructor(self):
123 return GeoSeries
124
125 def _wrapped_pandas_method(self, mtd, *args, **kwargs):
126 """Wrap a generic pandas method to ensure it returns a GeoSeries"""
127 val = getattr(super(GeoSeries, self), mtd)(*args, **kwargs)
128 if type(val) == Series:
129 val.__class__ = GeoSeries
130 val.crs = self.crs
131 val._invalidate_sindex()
132 return val
133
134 def __getitem__(self, key):
135 return self._wrapped_pandas_method('__getitem__', key)
136
137 def sort_index(self, *args, **kwargs):
138 return self._wrapped_pandas_method('sort_index', *args, **kwargs)
139
140 def take(self, *args, **kwargs):
141 return self._wrapped_pandas_method('take', *args, **kwargs)
142
143 def select(self, *args, **kwargs):
144 return self._wrapped_pandas_method('select', *args, **kwargs)
145
146 @property
147 def _can_hold_na(self):
148 return False
149
150 def __finalize__(self, other, method=None, **kwargs):
151 """ propagate metadata from other to self """
152 # NOTE: backported from pandas master (upcoming v0.13)
153 for name in self._metadata:
154 object.__setattr__(self, name, getattr(other, name, None))
155 return self
156
157 def copy(self, order='C'):
158 """
159 Make a copy of this GeoSeries object
160
161 Parameters
162 ----------
163 deep : boolean, default True
164 Make a deep copy, i.e. also copy data
165
166 Returns
167 -------
168 copy : GeoSeries
169 """
170 # FIXME: this will likely be unnecessary in pandas >= 0.13
171 return GeoSeries(self.values.copy(order), index=self.index,
172 name=self.name).__finalize__(self)
173
174 def isnull(self):
175 """Null values in a GeoSeries are represented by empty geometric objects"""
176 non_geo_null = super(GeoSeries, self).isnull()
177 val = self.apply(_is_empty)
178 return np.logical_or(non_geo_null, val)
179
180 def fillna(self, value=None, method=None, inplace=False,
181 **kwargs):
182 """Fill NA/NaN values with a geometry (empty polygon by default).
183
184 "method" is currently not implemented for pandas <= 0.12.
185 """
186 if value is None:
187 value = Point()
188 return super(GeoSeries, self).fillna(value=value, method=method,
189 inplace=inplace, **kwargs)
190
191 def align(self, other, join='outer', level=None, copy=True,
192 fill_value=None, **kwargs):
193 if fill_value is None:
194 fill_value = Point()
195 left, right = super(GeoSeries, self).align(other, join=join,
196 level=level, copy=copy,
197 fill_value=fill_value,
198 **kwargs)
199 if isinstance(other, GeoSeries):
200 return GeoSeries(left), GeoSeries(right)
201 else: # It is probably a Series, let's keep it that way
202 return GeoSeries(left), right
203
204
205 def __contains__(self, other):
206 """Allow tests of the form "geom in s"
207
208 Tests whether a GeoSeries contains a geometry.
209
210 Note: This is not the same as the geometric method "contains".
211 """
212 if isinstance(other, BaseGeometry):
213 return np.any(self.geom_equals(other))
214 else:
215 return False
216
217 def plot(self, *args, **kwargs):
218 return plot_series(self, *args, **kwargs)
219
220 plot.__doc__ = plot_series.__doc__
221
222 #
223 # Additional methods
224 #
225
226 def to_crs(self, crs=None, epsg=None):
227 """Transform geometries to a new coordinate reference system
228
229 This method will transform all points in all objects. It has
230 no notion or projecting entire geometries. All segments
231 joining points are assumed to be lines in the current
232 projection, not geodesics. Objects crossing the dateline (or
233 other projection boundary) will have undesirable behavior.
234
235 `to_crs` passes the `crs` argument to the `Proj` function from the
236 `pyproj` library (with the option `preserve_units=True`). It can
237 therefore accept proj4 projections in any format
238 supported by `Proj`, including dictionaries, or proj4 strings.
239
240 """
241 from fiona.crs import from_epsg
242 if self.crs is None:
243 raise ValueError('Cannot transform naive geometries. '
244 'Please set a crs on the object first.')
245 if crs is None:
246 try:
247 crs = from_epsg(epsg)
248 except TypeError:
249 raise TypeError('Must set either crs or epsg for output.')
250 proj_in = pyproj.Proj(self.crs, preserve_units=True)
251 proj_out = pyproj.Proj(crs, preserve_units=True)
252 project = partial(pyproj.transform, proj_in, proj_out)
253 result = self.apply(lambda geom: transform(project, geom))
254 result.__class__ = GeoSeries
255 result.crs = crs
256 result._invalidate_sindex()
257 return result
258
259 def to_json(self, **kwargs):
260 """
261 Returns a GeoJSON string representation of the GeoSeries.
262
263 Parameters
264 ----------
265 *kwargs* that will be passed to json.dumps().
266 """
267 return json.dumps(self.__geo_interface__, **kwargs)
268
269 #
270 # Implement standard operators for GeoSeries
271 #
272
273 def __xor__(self, other):
274 """Implement ^ operator as for builtin set type"""
275 return self.symmetric_difference(other)
276
277 def __or__(self, other):
278 """Implement | operator as for builtin set type"""
279 return self.union(other)
280
281 def __and__(self, other):
282 """Implement & operator as for builtin set type"""
283 return self.intersection(other)
284
285 def __sub__(self, other):
286 """Implement - operator as for builtin set type"""
287 return self.difference(other)
288
289 GeoSeries._create_indexer('cx', _CoordinateIndexer)
290
[end of geopandas/geoseries.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geopandas/geoseries.py b/geopandas/geoseries.py
--- a/geopandas/geoseries.py
+++ b/geopandas/geoseries.py
@@ -184,14 +184,14 @@
"method" is currently not implemented for pandas <= 0.12.
"""
if value is None:
- value = Point()
+ value = BaseGeometry()
return super(GeoSeries, self).fillna(value=value, method=method,
inplace=inplace, **kwargs)
def align(self, other, join='outer', level=None, copy=True,
fill_value=None, **kwargs):
if fill_value is None:
- fill_value = Point()
+ fill_value = BaseGeometry()
left, right = super(GeoSeries, self).align(other, join=join,
level=level, copy=copy,
fill_value=fill_value,
| {"golden_diff": "diff --git a/geopandas/geoseries.py b/geopandas/geoseries.py\n--- a/geopandas/geoseries.py\n+++ b/geopandas/geoseries.py\n@@ -184,14 +184,14 @@\n \"method\" is currently not implemented for pandas <= 0.12.\n \"\"\"\n if value is None:\n- value = Point()\n+ value = BaseGeometry()\n return super(GeoSeries, self).fillna(value=value, method=method,\n inplace=inplace, **kwargs)\n \n def align(self, other, join='outer', level=None, copy=True,\n fill_value=None, **kwargs):\n if fill_value is None:\n- fill_value = Point()\n+ fill_value = BaseGeometry()\n left, right = super(GeoSeries, self).align(other, join=join,\n level=level, copy=copy,\n fill_value=fill_value,\n", "issue": "BUG/COMPAT: missing values handling / fillna no longer working with shapely 1.6\nSee https://github.com/Toblerity/Shapely/issues/510 for more detailed exploration of the problem\r\n\r\nTemporarily pinned our CI to older shapely version (but kept one with the latest shapely in the allowed failures section) in https://github.com/geopandas/geopandas/pull/508\n", "before_files": [{"content": "from functools import partial\nimport json\nfrom warnings import warn\n\nimport numpy as np\nfrom pandas import Series, DataFrame\n\nimport pyproj\nfrom shapely.geometry import shape, Polygon, Point\nfrom shapely.geometry.collection import GeometryCollection\nfrom shapely.geometry.base import BaseGeometry\nfrom shapely.ops import transform\n\nfrom geopandas.plotting import plot_series\nfrom geopandas.base import GeoPandasBase, _series_unary_op, _CoordinateIndexer\n\n\ndef _is_empty(x):\n try:\n return x.is_empty\n except:\n return False\n\n\nclass GeoSeries(GeoPandasBase, Series):\n \"\"\"A Series object designed to store shapely geometry objects.\"\"\"\n _metadata = ['name', 'crs']\n\n def __new__(cls, *args, **kwargs):\n kwargs.pop('crs', None)\n arr = Series.__new__(cls)\n if type(arr) is GeoSeries:\n return arr\n else:\n return arr.view(GeoSeries)\n\n def __init__(self, *args, **kwargs):\n # fix problem for scalar geometries passed\n if len(args) == 1 and isinstance(args[0], BaseGeometry):\n args = ([args[0]],)\n\n crs = kwargs.pop('crs', None)\n\n super(GeoSeries, self).__init__(*args, **kwargs)\n self.crs = crs\n self._invalidate_sindex()\n\n def append(self, *args, **kwargs):\n return self._wrapped_pandas_method('append', *args, **kwargs)\n\n @property\n def geometry(self):\n return self\n\n @property\n def x(self):\n \"\"\"Return the x location of point geometries in a GeoSeries\"\"\"\n if (self.geom_type == \"Point\").all():\n return _series_unary_op(self, 'x', null_value=np.nan)\n else:\n message = \"x attribute access only provided for Point geometries\"\n raise ValueError(message)\n\n @property\n def y(self):\n \"\"\"Return the y location of point geometries in a GeoSeries\"\"\"\n if (self.geom_type == \"Point\").all():\n return _series_unary_op(self, 'y', null_value=np.nan)\n else:\n message = \"y attribute access only provided for Point geometries\"\n raise ValueError(message)\n\n @classmethod\n def from_file(cls, filename, **kwargs):\n \"\"\"\n Alternate constructor to create a GeoSeries from a file\n\n Parameters\n ----------\n\n filename : str\n File path or file handle to read from. Depending on which kwargs\n are included, the content of filename may vary, see:\n http://toblerity.github.io/fiona/README.html#usage\n for usage details.\n kwargs : key-word arguments\n These arguments are passed to fiona.open, and can be used to\n access multi-layer data, data stored within archives (zip files),\n etc.\n\n \"\"\"\n import fiona\n geoms = []\n with fiona.open(filename, **kwargs) as f:\n crs = f.crs\n for rec in f:\n geoms.append(shape(rec['geometry']))\n g = GeoSeries(geoms)\n g.crs = crs\n return g\n\n @property\n def __geo_interface__(self):\n \"\"\"Returns a GeoSeries as a python feature collection\n \"\"\"\n from geopandas import GeoDataFrame\n return GeoDataFrame({'geometry': self}).__geo_interface__\n\n def to_file(self, filename, driver=\"ESRI Shapefile\", **kwargs):\n from geopandas import GeoDataFrame\n data = GeoDataFrame({\"geometry\": self,\n \"id\":self.index.values},\n index=self.index)\n data.crs = self.crs\n data.to_file(filename, driver, **kwargs)\n\n #\n # Implement pandas methods\n #\n\n @property\n def _constructor(self):\n return GeoSeries\n\n def _wrapped_pandas_method(self, mtd, *args, **kwargs):\n \"\"\"Wrap a generic pandas method to ensure it returns a GeoSeries\"\"\"\n val = getattr(super(GeoSeries, self), mtd)(*args, **kwargs)\n if type(val) == Series:\n val.__class__ = GeoSeries\n val.crs = self.crs\n val._invalidate_sindex()\n return val\n\n def __getitem__(self, key):\n return self._wrapped_pandas_method('__getitem__', key)\n\n def sort_index(self, *args, **kwargs):\n return self._wrapped_pandas_method('sort_index', *args, **kwargs)\n\n def take(self, *args, **kwargs):\n return self._wrapped_pandas_method('take', *args, **kwargs)\n\n def select(self, *args, **kwargs):\n return self._wrapped_pandas_method('select', *args, **kwargs)\n\n @property\n def _can_hold_na(self):\n return False\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\" propagate metadata from other to self \"\"\"\n # NOTE: backported from pandas master (upcoming v0.13)\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def copy(self, order='C'):\n \"\"\"\n Make a copy of this GeoSeries object\n\n Parameters\n ----------\n deep : boolean, default True\n Make a deep copy, i.e. also copy data\n\n Returns\n -------\n copy : GeoSeries\n \"\"\"\n # FIXME: this will likely be unnecessary in pandas >= 0.13\n return GeoSeries(self.values.copy(order), index=self.index,\n name=self.name).__finalize__(self)\n\n def isnull(self):\n \"\"\"Null values in a GeoSeries are represented by empty geometric objects\"\"\"\n non_geo_null = super(GeoSeries, self).isnull()\n val = self.apply(_is_empty)\n return np.logical_or(non_geo_null, val)\n\n def fillna(self, value=None, method=None, inplace=False,\n **kwargs):\n \"\"\"Fill NA/NaN values with a geometry (empty polygon by default).\n\n \"method\" is currently not implemented for pandas <= 0.12.\n \"\"\"\n if value is None:\n value = Point()\n return super(GeoSeries, self).fillna(value=value, method=method,\n inplace=inplace, **kwargs)\n\n def align(self, other, join='outer', level=None, copy=True,\n fill_value=None, **kwargs):\n if fill_value is None:\n fill_value = Point()\n left, right = super(GeoSeries, self).align(other, join=join,\n level=level, copy=copy,\n fill_value=fill_value,\n **kwargs)\n if isinstance(other, GeoSeries):\n return GeoSeries(left), GeoSeries(right)\n else: # It is probably a Series, let's keep it that way\n return GeoSeries(left), right\n\n\n def __contains__(self, other):\n \"\"\"Allow tests of the form \"geom in s\"\n\n Tests whether a GeoSeries contains a geometry.\n\n Note: This is not the same as the geometric method \"contains\".\n \"\"\"\n if isinstance(other, BaseGeometry):\n return np.any(self.geom_equals(other))\n else:\n return False\n\n def plot(self, *args, **kwargs):\n return plot_series(self, *args, **kwargs)\n\n plot.__doc__ = plot_series.__doc__\n\n #\n # Additional methods\n #\n\n def to_crs(self, crs=None, epsg=None):\n \"\"\"Transform geometries to a new coordinate reference system\n\n This method will transform all points in all objects. It has\n no notion or projecting entire geometries. All segments\n joining points are assumed to be lines in the current\n projection, not geodesics. Objects crossing the dateline (or\n other projection boundary) will have undesirable behavior.\n\n `to_crs` passes the `crs` argument to the `Proj` function from the\n `pyproj` library (with the option `preserve_units=True`). It can\n therefore accept proj4 projections in any format\n supported by `Proj`, including dictionaries, or proj4 strings.\n\n \"\"\"\n from fiona.crs import from_epsg\n if self.crs is None:\n raise ValueError('Cannot transform naive geometries. '\n 'Please set a crs on the object first.')\n if crs is None:\n try:\n crs = from_epsg(epsg)\n except TypeError:\n raise TypeError('Must set either crs or epsg for output.')\n proj_in = pyproj.Proj(self.crs, preserve_units=True)\n proj_out = pyproj.Proj(crs, preserve_units=True)\n project = partial(pyproj.transform, proj_in, proj_out)\n result = self.apply(lambda geom: transform(project, geom))\n result.__class__ = GeoSeries\n result.crs = crs\n result._invalidate_sindex()\n return result\n\n def to_json(self, **kwargs):\n \"\"\"\n Returns a GeoJSON string representation of the GeoSeries.\n\n Parameters\n ----------\n *kwargs* that will be passed to json.dumps().\n \"\"\"\n return json.dumps(self.__geo_interface__, **kwargs)\n\n #\n # Implement standard operators for GeoSeries\n #\n\n def __xor__(self, other):\n \"\"\"Implement ^ operator as for builtin set type\"\"\"\n return self.symmetric_difference(other)\n\n def __or__(self, other):\n \"\"\"Implement | operator as for builtin set type\"\"\"\n return self.union(other)\n\n def __and__(self, other):\n \"\"\"Implement & operator as for builtin set type\"\"\"\n return self.intersection(other)\n\n def __sub__(self, other):\n \"\"\"Implement - operator as for builtin set type\"\"\"\n return self.difference(other)\n\nGeoSeries._create_indexer('cx', _CoordinateIndexer)\n", "path": "geopandas/geoseries.py"}]} | 3,566 | 202 |
gh_patches_debug_13477 | rasdani/github-patches | git_diff | dj-stripe__dj-stripe-547 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error: Cannot resolve keyword 'customer' into field.
All,
Cannot get past the step
`python manage.py djstripe_init_customers`
in the installation.
Running Python 3.6.0, Django 1.11, and the latest version of dj-stripe (1.0.0).
What combination of Django version and dj-stripe version are folks successfully using at the moment? Thanks!
Here is the traceback:
```
Traceback (most recent call last):
File "manage.py", line 22, in <module>
execute_from_command_line(sys.argv)
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/__init__.py", line 363, in execute_from_command_line
utility.execute()
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/__init__.py", line 355, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/Users/jdln/temp/dj-stripe/djstripe/management/commands/djstripe_init_customers.py", line 25, in handle
for subscriber in get_subscriber_model().objects.filter(customer__isnull=True):
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/query.py", line 781, in filter
return self._filter_or_exclude(False, *args, **kwargs)
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/query.py", line 799, in _filter_or_exclude
clone.query.add_q(Q(*args, **kwargs))
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1260, in add_q
clause, _ = self._add_q(q_object, self.used_aliases)
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1286, in _add_q
allow_joins=allow_joins, split_subq=split_subq,
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1164, in build_filter
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1044, in solve_lookup_type
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1362, in names_to_path
"Choices are: %s" % (name, ", ".join(available)))
django.core.exceptions.FieldError: Cannot resolve keyword 'customer' into field. Choices are: date_joined, djstripe_customers, email, first_name, groups, id, is_active, is_staff, is_superuser, last_login, last_name, logentry, password, user_permissions, username
```
</issue>
<code>
[start of djstripe/management/commands/djstripe_init_customers.py]
1 # -*- coding: utf-8 -*-
2 """
3 .. module:: djstripe.management.commands.djstripe_init_customers.
4
5 :synopsis: dj-stripe - init_customers command.
6
7 .. moduleauthor:: @kavdev, @pydanny
8
9 """
10 from __future__ import unicode_literals
11
12 from django.core.management.base import BaseCommand
13
14 from ...models import Customer
15 from ...settings import get_subscriber_model
16
17
18 class Command(BaseCommand):
19 """Create customer objects for existing subscribers that don't have one."""
20
21 help = "Create customer objects for existing subscribers that don't have one"
22
23 def handle(self, *args, **options):
24 """Create Customer objects for Subscribers without Customer objects associated."""
25 for subscriber in get_subscriber_model().objects.filter(customer__isnull=True):
26 # use get_or_create in case of race conditions on large subscriber bases
27 Customer.get_or_create(subscriber=subscriber)
28 print("Created subscriber for {0}".format(subscriber.email))
29
[end of djstripe/management/commands/djstripe_init_customers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/djstripe/management/commands/djstripe_init_customers.py b/djstripe/management/commands/djstripe_init_customers.py
--- a/djstripe/management/commands/djstripe_init_customers.py
+++ b/djstripe/management/commands/djstripe_init_customers.py
@@ -22,7 +22,7 @@
def handle(self, *args, **options):
"""Create Customer objects for Subscribers without Customer objects associated."""
- for subscriber in get_subscriber_model().objects.filter(customer__isnull=True):
+ for subscriber in get_subscriber_model().objects.filter(djstripe_customers=None):
# use get_or_create in case of race conditions on large subscriber bases
Customer.get_or_create(subscriber=subscriber)
print("Created subscriber for {0}".format(subscriber.email))
| {"golden_diff": "diff --git a/djstripe/management/commands/djstripe_init_customers.py b/djstripe/management/commands/djstripe_init_customers.py\n--- a/djstripe/management/commands/djstripe_init_customers.py\n+++ b/djstripe/management/commands/djstripe_init_customers.py\n@@ -22,7 +22,7 @@\n \n def handle(self, *args, **options):\n \"\"\"Create Customer objects for Subscribers without Customer objects associated.\"\"\"\n- for subscriber in get_subscriber_model().objects.filter(customer__isnull=True):\n+ for subscriber in get_subscriber_model().objects.filter(djstripe_customers=None):\n # use get_or_create in case of race conditions on large subscriber bases\n Customer.get_or_create(subscriber=subscriber)\n print(\"Created subscriber for {0}\".format(subscriber.email))\n", "issue": "Error: Cannot resolve keyword 'customer' into field. \nAll,\r\n\r\nCannot get past the step\r\n\r\n`python manage.py djstripe_init_customers`\r\n\r\nin the installation. \r\n\r\nRunning Python 3.6.0, Django 1.11, and the latest version of dj-stripe (1.0.0).\r\n\r\nWhat combination of Django version and dj-stripe version are folks successfully using at the moment? Thanks! \r\n\r\nHere is the traceback:\r\n```\r\nTraceback (most recent call last):\r\n File \"manage.py\", line 22, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/__init__.py\", line 363, in execute_from_command_line\r\n utility.execute()\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/__init__.py\", line 355, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/base.py\", line 283, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/base.py\", line 330, in execute\r\n output = self.handle(*args, **options)\r\n File \"/Users/jdln/temp/dj-stripe/djstripe/management/commands/djstripe_init_customers.py\", line 25, in handle\r\n for subscriber in get_subscriber_model().objects.filter(customer__isnull=True):\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/manager.py\", line 85, in manager_method\r\n return getattr(self.get_queryset(), name)(*args, **kwargs)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/query.py\", line 781, in filter\r\n return self._filter_or_exclude(False, *args, **kwargs)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/query.py\", line 799, in _filter_or_exclude\r\n clone.query.add_q(Q(*args, **kwargs))\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1260, in add_q\r\n clause, _ = self._add_q(q_object, self.used_aliases)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1286, in _add_q\r\n allow_joins=allow_joins, split_subq=split_subq,\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1164, in build_filter\r\n lookups, parts, reffed_expression = self.solve_lookup_type(arg)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1044, in solve_lookup_type\r\n _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1362, in names_to_path\r\n \"Choices are: %s\" % (name, \", \".join(available)))\r\ndjango.core.exceptions.FieldError: Cannot resolve keyword 'customer' into field. Choices are: date_joined, djstripe_customers, email, first_name, groups, id, is_active, is_staff, is_superuser, last_login, last_name, logentry, password, user_permissions, username\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: djstripe.management.commands.djstripe_init_customers.\n\n :synopsis: dj-stripe - init_customers command.\n\n.. moduleauthor:: @kavdev, @pydanny\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom django.core.management.base import BaseCommand\n\nfrom ...models import Customer\nfrom ...settings import get_subscriber_model\n\n\nclass Command(BaseCommand):\n \"\"\"Create customer objects for existing subscribers that don't have one.\"\"\"\n\n help = \"Create customer objects for existing subscribers that don't have one\"\n\n def handle(self, *args, **options):\n \"\"\"Create Customer objects for Subscribers without Customer objects associated.\"\"\"\n for subscriber in get_subscriber_model().objects.filter(customer__isnull=True):\n # use get_or_create in case of race conditions on large subscriber bases\n Customer.get_or_create(subscriber=subscriber)\n print(\"Created subscriber for {0}\".format(subscriber.email))\n", "path": "djstripe/management/commands/djstripe_init_customers.py"}]} | 1,699 | 182 |
gh_patches_debug_12626 | rasdani/github-patches | git_diff | cowrie__cowrie-415 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
^M for carriage return in telnet
Trying to log in via telnet to cowrie results in the following:
$telnet xx.yy.zz.aa
Trying xx.yy.zz.aa
Connected to xx.yy.zz.aa.
Escape character is '^]'.
login: icantpressenter^M^M^M^M^M^M^M^M^M^M^M^]
telnet> q
Connection closed.
It looks like some kind of control character thing. Telnet on windows "kind of works", and telnet on OS X and Linux (Debian 8.6) produce the above ^M issue.
oddly, specifying the destination port in the telnet client (e.g "telnet host 23" ) removes this issue. Probably because control messages aren't sent.
</issue>
<code>
[start of cowrie/telnet/transport.py]
1 # Copyright (C) 2015, 2016 GoSecure Inc.
2 """
3 Telnet Transport and Authentication for the Honeypot
4
5 @author: Olivier Bilodeau <[email protected]>
6 """
7
8 import struct
9 import time
10 import uuid
11 import inspect
12 import random
13
14 from twisted.python import log
15 from twisted.internet import protocol
16 from twisted.conch.telnet import AuthenticatingTelnetProtocol, ECHO, TRAPSIG, \
17 ITelnetProtocol, ProtocolTransportMixin, \
18 SGA, NAWS, MODE, LINEMODE, TelnetTransport, AlreadyNegotiating
19 from twisted.protocols.policies import TimeoutMixin
20
21 from cowrie.core.credentials import UsernamePasswordIP
22
23 class HoneyPotTelnetFactory(protocol.ServerFactory):
24 """
25 This factory creates HoneyPotTelnetAuthProtocol instances
26 They listen directly to the TCP port
27 """
28 tac = None # gets set later
29
30 def __init__(self, cfg):
31 self.cfg = cfg
32
33
34 # TODO logging clarity can be improved: see what SSH does
35 def logDispatch(self, *msg, **args):
36 """
37 Special delivery to the loggers to avoid scope problems
38 """
39 args['sessionno'] = 'T'+str(args['sessionno'])
40 for dblog in self.tac.dbloggers:
41 dblog.logDispatch(*msg, **args)
42 for output in self.tac.output_plugins:
43 output.logDispatch(*msg, **args)
44
45
46 def startFactory(self):
47 """
48 """
49 try:
50 honeyfs = self.portal.realm.cfg.get('honeypot', 'contents_path')
51 issuefile = honeyfs + "/etc/issue.net"
52 self.banner = open(issuefile).read()
53 except IOError:
54 self.banner = ""
55
56 # For use by the uptime command
57 self.starttime = time.time()
58
59 # hook protocol
60 self.protocol = lambda: CowrieTelnetTransport(HoneyPotTelnetAuthProtocol,
61 self.portal)
62 protocol.ServerFactory.startFactory(self)
63 log.msg("Ready to accept Telnet connections")
64
65
66 def stopFactory(self):
67 """
68 Stop output plugins
69 """
70 protocol.ServerFactory.stopFactory(self)
71
72
73 class HoneyPotTelnetAuthProtocol(AuthenticatingTelnetProtocol):
74 """
75 TelnetAuthProtocol that takes care of Authentication. Once authenticated this
76 protocol is replaced with HoneyPotTelnetSession.
77 """
78
79 loginPrompt = 'login: '
80 passwordPrompt = 'Password: '
81 windowSize = [40, 80]
82
83 def connectionMade(self):
84 """
85 """
86 self.transport.negotiationMap[NAWS] = self.telnet_NAWS
87 # Initial option negotation. Want something at least for Mirai
88 for opt in (NAWS,):
89 self.transport.doChain(opt).addErrback(log.err)
90
91 # I need to doubly escape here since my underlying
92 # CowrieTelnetTransport hack would remove it and leave just \n
93 self.transport.write(self.factory.banner.replace('\n', '\r\r\n'))
94 self.transport.write(self.loginPrompt)
95
96
97 def connectionLost(self, reason):
98 """
99 Fires on pre-authentication disconnects
100 """
101 AuthenticatingTelnetProtocol.connectionLost(self, reason)
102
103
104 def telnet_User(self, line):
105 """
106 Overridden to conditionally kill 'WILL ECHO' which confuses clients
107 that don't implement a proper Telnet protocol (most malware)
108 """
109 self.username = line
110 # only send ECHO option if we are chatting with a real Telnet client
111 #if self.transport.options: <-- doesn't work
112 self.transport.willChain(ECHO)
113 # FIXME: this should be configurable or provided via filesystem
114 self.transport.write(self.passwordPrompt)
115 return 'Password'
116
117
118 def telnet_Password(self, line):
119 username, password = self.username, line
120 del self.username
121 def login(ignored):
122 self.src_ip = self.transport.getPeer().host
123 creds = UsernamePasswordIP(username, password, self.src_ip)
124 d = self.portal.login(creds, self.src_ip, ITelnetProtocol)
125 d.addCallback(self._cbLogin)
126 d.addErrback(self._ebLogin)
127
128 # are we dealing with a real Telnet client?
129 if self.transport.options:
130 # stop ECHO
131 # even if ECHO negotiation fails we still want to attempt a login
132 # this allows us to support dumb clients which is common in malware
133 # thus the addBoth: on success and on exception (AlreadyNegotiating)
134 self.transport.wontChain(ECHO).addBoth(login)
135 else:
136 # process login
137 login('')
138
139 return 'Discard'
140
141 def telnet_Command(self, command):
142 self.transport.protocol.dataReceived(command+'\r')
143 return "Command"
144
145 def _cbLogin(self, ial):
146 """
147 Fired on a successful login
148 """
149 interface, protocol, logout = ial
150 protocol.windowSize = self.windowSize
151 self.protocol = protocol
152 self.logout = logout
153 self.state = 'Command'
154
155 # Remove the short timeout of the login prompt. Timeout will be
156 # provided later by the HoneyPotBaseProtocol class.
157 self.transport.setTimeout(None)
158
159 # replace myself with avatar protocol
160 protocol.makeConnection(self.transport)
161 self.transport.protocol = protocol
162
163
164 def _ebLogin(self, failure):
165 # TODO: provide a way to have user configurable strings for wrong password
166 self.transport.wontChain(ECHO)
167 self.transport.write("\nLogin incorrect\n")
168 self.transport.write(self.loginPrompt)
169 self.state = "User"
170
171 # From TelnetBootstrapProtocol in twisted/conch/telnet.py
172 def telnet_NAWS(self, data):
173 if len(data) == 4:
174 width, height = struct.unpack('!HH', b''.join(data))
175 self.windowSize = [height, width]
176 else:
177 log.msg("Wrong number of NAWS bytes")
178
179 def enableLocal(self, opt):
180 if opt == ECHO:
181 return True
182 elif opt == SGA:
183 return True
184 else:
185 return False
186
187
188 def enableRemote(self, opt):
189 if opt == LINEMODE:
190 self.transport.requestNegotiation(LINEMODE, MODE + chr(TRAPSIG))
191 return True
192 elif opt == NAWS:
193 return True
194 elif opt == SGA:
195 return True
196 else:
197 return False
198
199
200
201 class CowrieTelnetTransport(TelnetTransport, TimeoutMixin):
202 """
203 """
204 def connectionMade(self):
205 self.transportId = uuid.uuid4().hex[:8]
206 sessionno = self.transport.sessionno
207 self.startTime = time.time()
208 self.setTimeout(300)
209
210 log.msg(eventid='cowrie.session.connect',
211 format='New connection: %(src_ip)s:%(src_port)s (%(dst_ip)s:%(dst_port)s) [session: T%(sessionno)s]',
212 src_ip=self.transport.getPeer().host, src_port=self.transport.getPeer().port,
213 dst_ip=self.transport.getHost().host, dst_port=self.transport.getHost().port,
214 session=self.transportId, sessionno='T'+str(sessionno))
215 TelnetTransport.connectionMade(self)
216
217
218 def write(self, bytes):
219 """
220 Because of the presence of two ProtocolTransportMixin in the protocol
221 stack once authenticated, I need to override write() and remove a \r
222 otherwise we end up with \r\r\n on the wire.
223
224 It is kind of a hack. I asked for a better solution here:
225 http://stackoverflow.com/questions/35087250/twisted-telnet-server-how-to-avoid-nested-crlf
226 """
227 self.transport.write(bytes.replace('\r\n', '\n'))
228
229
230 def connectionLost(self, reason):
231 """
232 Fires on pre-authentication disconnects
233 """
234 self.setTimeout(None)
235 TelnetTransport.connectionLost(self, reason)
236 duration = time.time() - self.startTime
237 log.msg(eventid='cowrie.session.closed',
238 format='Connection lost after %(duration)d seconds',
239 duration=duration)
240
241 def willChain(self, option):
242 return self._chainNegotiation(None, self.will, option)
243
244 def wontChain(self, option):
245 return self._chainNegotiation(None, self.wont, option)
246
247 def doChain(self, option):
248 return self._chainNegotiation(None, self.do, option)
249
250 def dontChain(self, option):
251 return self._chainNegotiation(None, self.dont, option)
252
253 def _handleNegotiationError(self, f, func, option):
254 if f.type is AlreadyNegotiating:
255 s = self.getOptionState(option)
256 if func in (self.do, self.dont):
257 s.him.onResult.addCallback(self._chainNegotiation, func, option)
258 s.him.onResult.addErrback(self._handleNegotiationError, func, option)
259 if func in (self.will, self.wont):
260 s.us.onResult.addCallback(self._chainNegotiation, func, option)
261 s.us.onResult.addErrback(self._handleNegotiationError, func, option)
262 # We only care about AlreadyNegotiating, everything else can be ignored
263 # Possible other types include OptionRefused, AlreadyDisabled, AlreadyEnabled, ConnectionDone, ConnectionLost
264 elif f.type is AssertionError:
265 log.err('Client tried to illegally refuse to disable an option; ignoring, but undefined behavior may result')
266 # TODO: Is ignoring this violation of the protocol the proper behavior?
267 # Should the connection be terminated instead?
268 # The telnetd package on Ubuntu (netkit-telnet) does all negotiation before sending the login prompt,
269 # but does handle client-initiated negotiation at any time.
270 return None # This Failure has been handled, no need to continue processing errbacks
271
272 def _chainNegotiation(self, res, func, option):
273 return func(option).addErrback(self._handleNegotiationError, func, option)
274
[end of cowrie/telnet/transport.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cowrie/telnet/transport.py b/cowrie/telnet/transport.py
--- a/cowrie/telnet/transport.py
+++ b/cowrie/telnet/transport.py
@@ -180,15 +180,17 @@
if opt == ECHO:
return True
elif opt == SGA:
- return True
+ return False
+ #return True
else:
return False
def enableRemote(self, opt):
if opt == LINEMODE:
- self.transport.requestNegotiation(LINEMODE, MODE + chr(TRAPSIG))
- return True
+ return False
+ #self.transport.requestNegotiation(LINEMODE, MODE + chr(TRAPSIG))
+ #return True
elif opt == NAWS:
return True
elif opt == SGA:
| {"golden_diff": "diff --git a/cowrie/telnet/transport.py b/cowrie/telnet/transport.py\n--- a/cowrie/telnet/transport.py\n+++ b/cowrie/telnet/transport.py\n@@ -180,15 +180,17 @@\n if opt == ECHO:\n return True\n elif opt == SGA:\n- return True\n+ return False\n+ #return True\n else:\n return False\n \n \n def enableRemote(self, opt):\n if opt == LINEMODE:\n- self.transport.requestNegotiation(LINEMODE, MODE + chr(TRAPSIG))\n- return True\n+ return False\n+ #self.transport.requestNegotiation(LINEMODE, MODE + chr(TRAPSIG))\n+ #return True\n elif opt == NAWS:\n return True\n elif opt == SGA:\n", "issue": "^M for carriage return in telnet\nTrying to log in via telnet to cowrie results in the following:\r\n\r\n$telnet xx.yy.zz.aa\r\nTrying xx.yy.zz.aa\r\nConnected to xx.yy.zz.aa.\r\nEscape character is '^]'.\r\nlogin: icantpressenter^M^M^M^M^M^M^M^M^M^M^M^]\r\ntelnet> q\r\nConnection closed.\r\n\r\nIt looks like some kind of control character thing. Telnet on windows \"kind of works\", and telnet on OS X and Linux (Debian 8.6) produce the above ^M issue.\r\n\r\noddly, specifying the destination port in the telnet client (e.g \"telnet host 23\" ) removes this issue. Probably because control messages aren't sent.\n", "before_files": [{"content": "# Copyright (C) 2015, 2016 GoSecure Inc.\n\"\"\"\nTelnet Transport and Authentication for the Honeypot\n\n@author: Olivier Bilodeau <[email protected]>\n\"\"\"\n\nimport struct\nimport time\nimport uuid\nimport inspect\nimport random\n\nfrom twisted.python import log\nfrom twisted.internet import protocol\nfrom twisted.conch.telnet import AuthenticatingTelnetProtocol, ECHO, TRAPSIG, \\\n ITelnetProtocol, ProtocolTransportMixin, \\\n SGA, NAWS, MODE, LINEMODE, TelnetTransport, AlreadyNegotiating\nfrom twisted.protocols.policies import TimeoutMixin\n\nfrom cowrie.core.credentials import UsernamePasswordIP\n\nclass HoneyPotTelnetFactory(protocol.ServerFactory):\n \"\"\"\n This factory creates HoneyPotTelnetAuthProtocol instances\n They listen directly to the TCP port\n \"\"\"\n tac = None # gets set later\n\n def __init__(self, cfg):\n self.cfg = cfg\n\n\n # TODO logging clarity can be improved: see what SSH does\n def logDispatch(self, *msg, **args):\n \"\"\"\n Special delivery to the loggers to avoid scope problems\n \"\"\"\n args['sessionno'] = 'T'+str(args['sessionno'])\n for dblog in self.tac.dbloggers:\n dblog.logDispatch(*msg, **args)\n for output in self.tac.output_plugins:\n output.logDispatch(*msg, **args)\n\n\n def startFactory(self):\n \"\"\"\n \"\"\"\n try:\n honeyfs = self.portal.realm.cfg.get('honeypot', 'contents_path')\n issuefile = honeyfs + \"/etc/issue.net\"\n self.banner = open(issuefile).read()\n except IOError:\n self.banner = \"\"\n\n # For use by the uptime command\n self.starttime = time.time()\n\n # hook protocol\n self.protocol = lambda: CowrieTelnetTransport(HoneyPotTelnetAuthProtocol,\n self.portal)\n protocol.ServerFactory.startFactory(self)\n log.msg(\"Ready to accept Telnet connections\")\n\n\n def stopFactory(self):\n \"\"\"\n Stop output plugins\n \"\"\"\n protocol.ServerFactory.stopFactory(self)\n\n\nclass HoneyPotTelnetAuthProtocol(AuthenticatingTelnetProtocol):\n \"\"\"\n TelnetAuthProtocol that takes care of Authentication. Once authenticated this\n protocol is replaced with HoneyPotTelnetSession.\n \"\"\"\n\n loginPrompt = 'login: '\n passwordPrompt = 'Password: '\n windowSize = [40, 80]\n\n def connectionMade(self):\n \"\"\"\n \"\"\"\n self.transport.negotiationMap[NAWS] = self.telnet_NAWS\n # Initial option negotation. Want something at least for Mirai\n for opt in (NAWS,):\n self.transport.doChain(opt).addErrback(log.err)\n\n # I need to doubly escape here since my underlying\n # CowrieTelnetTransport hack would remove it and leave just \\n\n self.transport.write(self.factory.banner.replace('\\n', '\\r\\r\\n'))\n self.transport.write(self.loginPrompt)\n\n\n def connectionLost(self, reason):\n \"\"\"\n Fires on pre-authentication disconnects\n \"\"\"\n AuthenticatingTelnetProtocol.connectionLost(self, reason)\n\n\n def telnet_User(self, line):\n \"\"\"\n Overridden to conditionally kill 'WILL ECHO' which confuses clients\n that don't implement a proper Telnet protocol (most malware)\n \"\"\"\n self.username = line\n # only send ECHO option if we are chatting with a real Telnet client\n #if self.transport.options: <-- doesn't work\n self.transport.willChain(ECHO)\n # FIXME: this should be configurable or provided via filesystem\n self.transport.write(self.passwordPrompt)\n return 'Password'\n\n\n def telnet_Password(self, line):\n username, password = self.username, line\n del self.username\n def login(ignored):\n self.src_ip = self.transport.getPeer().host\n creds = UsernamePasswordIP(username, password, self.src_ip)\n d = self.portal.login(creds, self.src_ip, ITelnetProtocol)\n d.addCallback(self._cbLogin)\n d.addErrback(self._ebLogin)\n\n # are we dealing with a real Telnet client?\n if self.transport.options:\n # stop ECHO\n # even if ECHO negotiation fails we still want to attempt a login\n # this allows us to support dumb clients which is common in malware\n # thus the addBoth: on success and on exception (AlreadyNegotiating)\n self.transport.wontChain(ECHO).addBoth(login)\n else:\n # process login\n login('')\n\n return 'Discard'\n\n def telnet_Command(self, command):\n self.transport.protocol.dataReceived(command+'\\r')\n return \"Command\"\n\n def _cbLogin(self, ial):\n \"\"\"\n Fired on a successful login\n \"\"\"\n interface, protocol, logout = ial\n protocol.windowSize = self.windowSize\n self.protocol = protocol\n self.logout = logout\n self.state = 'Command'\n\n # Remove the short timeout of the login prompt. Timeout will be\n # provided later by the HoneyPotBaseProtocol class.\n self.transport.setTimeout(None)\n\n # replace myself with avatar protocol\n protocol.makeConnection(self.transport)\n self.transport.protocol = protocol\n\n\n def _ebLogin(self, failure):\n # TODO: provide a way to have user configurable strings for wrong password\n self.transport.wontChain(ECHO)\n self.transport.write(\"\\nLogin incorrect\\n\")\n self.transport.write(self.loginPrompt)\n self.state = \"User\"\n\n # From TelnetBootstrapProtocol in twisted/conch/telnet.py\n def telnet_NAWS(self, data):\n if len(data) == 4:\n width, height = struct.unpack('!HH', b''.join(data))\n self.windowSize = [height, width]\n else:\n log.msg(\"Wrong number of NAWS bytes\")\n\n def enableLocal(self, opt):\n if opt == ECHO:\n return True\n elif opt == SGA:\n return True\n else:\n return False\n\n\n def enableRemote(self, opt):\n if opt == LINEMODE:\n self.transport.requestNegotiation(LINEMODE, MODE + chr(TRAPSIG))\n return True\n elif opt == NAWS:\n return True\n elif opt == SGA:\n return True\n else:\n return False\n\n\n\nclass CowrieTelnetTransport(TelnetTransport, TimeoutMixin):\n \"\"\"\n \"\"\"\n def connectionMade(self):\n self.transportId = uuid.uuid4().hex[:8]\n sessionno = self.transport.sessionno\n self.startTime = time.time()\n self.setTimeout(300)\n\n log.msg(eventid='cowrie.session.connect',\n format='New connection: %(src_ip)s:%(src_port)s (%(dst_ip)s:%(dst_port)s) [session: T%(sessionno)s]',\n src_ip=self.transport.getPeer().host, src_port=self.transport.getPeer().port,\n dst_ip=self.transport.getHost().host, dst_port=self.transport.getHost().port,\n session=self.transportId, sessionno='T'+str(sessionno))\n TelnetTransport.connectionMade(self)\n\n\n def write(self, bytes):\n \"\"\"\n Because of the presence of two ProtocolTransportMixin in the protocol\n stack once authenticated, I need to override write() and remove a \\r\n otherwise we end up with \\r\\r\\n on the wire.\n\n It is kind of a hack. I asked for a better solution here:\n http://stackoverflow.com/questions/35087250/twisted-telnet-server-how-to-avoid-nested-crlf\n \"\"\"\n self.transport.write(bytes.replace('\\r\\n', '\\n'))\n\n\n def connectionLost(self, reason):\n \"\"\"\n Fires on pre-authentication disconnects\n \"\"\"\n self.setTimeout(None)\n TelnetTransport.connectionLost(self, reason)\n duration = time.time() - self.startTime\n log.msg(eventid='cowrie.session.closed',\n format='Connection lost after %(duration)d seconds',\n duration=duration)\n\n def willChain(self, option):\n return self._chainNegotiation(None, self.will, option)\n\n def wontChain(self, option):\n return self._chainNegotiation(None, self.wont, option)\n\n def doChain(self, option):\n return self._chainNegotiation(None, self.do, option)\n\n def dontChain(self, option):\n return self._chainNegotiation(None, self.dont, option)\n\n def _handleNegotiationError(self, f, func, option):\n if f.type is AlreadyNegotiating:\n s = self.getOptionState(option)\n if func in (self.do, self.dont):\n s.him.onResult.addCallback(self._chainNegotiation, func, option)\n s.him.onResult.addErrback(self._handleNegotiationError, func, option)\n if func in (self.will, self.wont):\n s.us.onResult.addCallback(self._chainNegotiation, func, option)\n s.us.onResult.addErrback(self._handleNegotiationError, func, option)\n # We only care about AlreadyNegotiating, everything else can be ignored\n # Possible other types include OptionRefused, AlreadyDisabled, AlreadyEnabled, ConnectionDone, ConnectionLost\n elif f.type is AssertionError:\n log.err('Client tried to illegally refuse to disable an option; ignoring, but undefined behavior may result')\n # TODO: Is ignoring this violation of the protocol the proper behavior?\n # Should the connection be terminated instead?\n # The telnetd package on Ubuntu (netkit-telnet) does all negotiation before sending the login prompt,\n # but does handle client-initiated negotiation at any time.\n return None # This Failure has been handled, no need to continue processing errbacks\n\n def _chainNegotiation(self, res, func, option):\n return func(option).addErrback(self._handleNegotiationError, func, option)\n", "path": "cowrie/telnet/transport.py"}]} | 3,625 | 197 |
gh_patches_debug_48141 | rasdani/github-patches | git_diff | google__flax-270 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`typing._ClassVar` cannot be accessed in the iPython shell – dataclasses package seems to mess up on Python 3.7
### Problem you have encountered:
I just installed flax and tried to import it from the iPython shell. But it raises an `AttributeError`.
```
In [1]: import flax
*snip*
~/.virtualenvs/flax2/lib/python3.7/site-packages/dataclasses.py in _is_classvar(a_type, typing)
548 # This test uses a typing internal class, but it's the best way to
549 # test if this is a ClassVar.
--> 550 return type(a_type) is typing._ClassVar
551
552
AttributeError: module 'typing' has no attribute '_ClassVar'
```
This does not happen in the normal interpreter, where everything goes fine.
### What you expected to happen:
I expected the import to work the same in iPython and the normal python shell.
### Logs, error messages, etc:
Full traceback in this gist: https://gist.github.com/bayerj/96f096c7fb09a7c9b758dabdbca32671
### Steps to reproduce:
On Mac OS X with Python 3.7.6, not anaconda, virtuelenvwrapper installed.
```
❯❯❯ mkvirtualenv flax2
❯❯❯ pip install jaxlib
*snip*
❯❯❯ pip install flax
*snip*
❯❯❯ ipython
*snip*
In [1]: import flax
```
### Workaround
The problem seems to be in the `dataclasses` package–not python's own one–from PyPI. If I uninstall it...
```
❯❯❯ pip uninstall dataclasses
Found existing installation: dataclasses 0.6
Uninstalling dataclasses-0.6:
Would remove:
/Users/bayerj/.virtualenvs/debug2/lib/python3.7/site-packages/dataclasses-0.6.dist-info/*
/Users/bayerj/.virtualenvs/debug2/lib/python3.7/site-packages/dataclasses.py
Proceed (y/n)? y
Successfully uninstalled dataclasses-0.6
❯❯❯ ipython
/usr/local/lib/python3.7/site-packages/IPython/core/interactiveshell.py:931: UserWarning: Attempting to work in a virtualenv. If you encounter problems, please install IPython inside the virtualenv.
warn("Attempting to work in a virtualenv. If you encounter problems, please "
Python 3.7.6 (default, Dec 30 2019, 19:38:28)
Type 'copyright', 'credits' or 'license' for more information
IPython 7.9.0 -- An enhanced Interactive Python. Type '?' for help.
In [1]: import flax
```
... this goes fine.
</issue>
<code>
[start of setup.py]
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """setup.py for Flax."""
16
17 import os
18 from setuptools import find_packages
19 from setuptools import setup
20
21 version = "0.1.0"
22
23 here = os.path.abspath(os.path.dirname(__file__))
24 try:
25 README = open(os.path.join(here, "README.md"), encoding='utf-8').read()
26 except IOError:
27 README = ""
28
29 install_requires = [
30 "numpy>=1.12",
31 "jax>=0.1.59",
32 "matplotlib", # only needed for tensorboard export
33 "dataclasses", # will only install on py3.6
34 "msgpack",
35 ]
36
37 tests_require = [
38 "jaxlib",
39 "pytest",
40 "pytest-cov",
41 "pytest-xdist",
42 "tensorflow",
43 "tensorflow_datasets",
44 ]
45
46 setup(
47 name="flax",
48 version=version,
49 description="Flax: A neural network library for JAX designed for flexibility",
50 long_description="\n\n".join([README]),
51 long_description_content_type='text/markdown',
52 classifiers=[
53 "Development Status :: 3 - Alpha",
54 "Intended Audience :: Developers",
55 "Intended Audience :: Science/Research",
56 "License :: OSI Approved :: MIT License",
57 "Programming Language :: Python :: 3.7",
58 "Topic :: Scientific/Engineering :: Artificial Intelligence",
59 ],
60 keywords="",
61 author="Flax team",
62 author_email="[email protected]",
63 url="https://github.com/google/flax",
64 license="Apache",
65 packages=find_packages(),
66 include_package_data=False,
67 zip_safe=False,
68 install_requires=install_requires,
69 extras_require={
70 "testing": tests_require,
71 },
72 )
73
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@
"numpy>=1.12",
"jax>=0.1.59",
"matplotlib", # only needed for tensorboard export
- "dataclasses", # will only install on py3.6
+ "dataclasses;python_version<'3.7'", # will only install on py3.6
"msgpack",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,7 @@\n \"numpy>=1.12\",\n \"jax>=0.1.59\",\n \"matplotlib\", # only needed for tensorboard export\n- \"dataclasses\", # will only install on py3.6\n+ \"dataclasses;python_version<'3.7'\", # will only install on py3.6\n \"msgpack\",\n ]\n", "issue": "`typing._ClassVar` cannot be accessed in the iPython shell \u2013 dataclasses package seems to mess up on Python 3.7\n### Problem you have encountered:\r\n\r\nI just installed flax and tried to import it from the iPython shell. But it raises an `AttributeError`.\r\n\r\n```\r\nIn [1]: import flax\r\n\r\n*snip*\r\n\r\n~/.virtualenvs/flax2/lib/python3.7/site-packages/dataclasses.py in _is_classvar(a_type, typing)\r\n 548 # This test uses a typing internal class, but it's the best way to\r\n 549 # test if this is a ClassVar.\r\n--> 550 return type(a_type) is typing._ClassVar\r\n 551\r\n 552\r\n\r\nAttributeError: module 'typing' has no attribute '_ClassVar'\r\n```\r\nThis does not happen in the normal interpreter, where everything goes fine. \r\n\r\n### What you expected to happen:\r\n\r\nI expected the import to work the same in iPython and the normal python shell.\r\n\r\n### Logs, error messages, etc:\r\n\r\nFull traceback in this gist: https://gist.github.com/bayerj/96f096c7fb09a7c9b758dabdbca32671\r\n\r\n### Steps to reproduce:\r\n\r\nOn Mac OS X with Python 3.7.6, not anaconda, virtuelenvwrapper installed.\r\n\r\n```\r\n\u276f\u276f\u276f mkvirtualenv flax2\r\n\u276f\u276f\u276f pip install jaxlib\r\n*snip*\r\n\u276f\u276f\u276f pip install flax\r\n*snip*\r\n\u276f\u276f\u276f ipython\r\n\r\n*snip*\r\n\r\nIn [1]: import flax\r\n```\r\n\r\n### Workaround\r\n\r\nThe problem seems to be in the `dataclasses` package\u2013not python's own one\u2013from PyPI. If I uninstall it...\r\n\r\n```\r\n\u276f\u276f\u276f pip uninstall dataclasses\r\nFound existing installation: dataclasses 0.6\r\nUninstalling dataclasses-0.6:\r\n Would remove:\r\n /Users/bayerj/.virtualenvs/debug2/lib/python3.7/site-packages/dataclasses-0.6.dist-info/*\r\n /Users/bayerj/.virtualenvs/debug2/lib/python3.7/site-packages/dataclasses.py\r\nProceed (y/n)? y\r\n Successfully uninstalled dataclasses-0.6\r\n\u276f\u276f\u276f ipython\r\n/usr/local/lib/python3.7/site-packages/IPython/core/interactiveshell.py:931: UserWarning: Attempting to work in a virtualenv. If you encounter problems, please install IPython inside the virtualenv.\r\n warn(\"Attempting to work in a virtualenv. If you encounter problems, please \"\r\nPython 3.7.6 (default, Dec 30 2019, 19:38:28)\r\nType 'copyright', 'credits' or 'license' for more information\r\nIPython 7.9.0 -- An enhanced Interactive Python. Type '?' for help.\r\n\r\nIn [1]: import flax\r\n```\r\n... this goes fine.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nversion = \"0.1.0\"\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding='utf-8').read()\nexcept IOError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.1.59\",\n \"matplotlib\", # only needed for tensorboard export\n \"dataclasses\", # will only install on py3.6\n \"msgpack\",\n]\n\ntests_require = [\n \"jaxlib\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"tensorflow\",\n \"tensorflow_datasets\",\n]\n\nsetup(\n name=\"flax\",\n version=version,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type='text/markdown',\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n license=\"Apache\",\n packages=find_packages(),\n include_package_data=False,\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py"}]} | 1,819 | 111 |
gh_patches_debug_58693 | rasdani/github-patches | git_diff | google-parfait__tensorflow-federated-1334 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Keras model in federated_learning_for_image_classification.ipynb throws warning
**Describe the bug**
Keras Sequential Model in [federated_learning_for_image_classification.ipynb](https://github.com/tensorflow/federated/blob/master/docs/tutorials/federated_learning_for_image_classification.ipynb) throws warning.
The model in the notebook is
```python
def create_keras_model():
return tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(784,)),
tf.keras.layers.Dense(10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
```
Warning thrown:
```python
WARNING:tensorflow:Please add `keras.layers.InputLayer` instead of `keras.Input` to Sequential model. `keras.Input` is intended to be used by Functional model.
<tensorflow.python.keras.engine.sequential.Sequential at 0x7f66178a46d0>
```
Easily fixed using the correct layer type:
```python
def create_keras_model():
return tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(784,)),
tf.keras.layers.Dense(10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
```
[colab](https://colab.research.google.com/drive/1LFgBiu9xUa-k92IW24fiSX_kVp7lb0SB?usp=sharing) notebook that reproduces the bug.
</issue>
<code>
[start of tensorflow_federated/python/examples/remote_execution/remote_executor_example.py]
1 # Copyright 2018, The TensorFlow Federated Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Example showing how to run a multi-machine simulation.
15
16 In order to run this example, you must have a running instance of the
17 Executor Service, either locally or on Kubernetes.
18
19 The model trains EMNIST for a small number of rounds, but uses a RemoteExecutor
20 to distribute the work to the ExecutorService.
21 """
22
23 import collections
24 import warnings
25
26 from absl import app
27 from absl import flags
28 import grpc
29 import numpy as np
30 import tensorflow as tf
31 import tensorflow_federated as tff
32
33 FLAGS = flags.FLAGS
34
35 flags.DEFINE_string('host', None, 'The host to connect to.')
36 flags.mark_flag_as_required('host')
37 flags.DEFINE_string('port', '8000', 'The port to connect to.')
38 flags.DEFINE_integer('n_clients', 10, 'Number of clients.')
39 flags.DEFINE_integer('n_rounds', 3, 'Number of rounds.')
40
41
42 def preprocess(dataset):
43
44 def element_fn(element):
45 return collections.OrderedDict([
46 ('x', tf.reshape(element['pixels'], [-1])),
47 ('y', tf.reshape(element['label'], [1])),
48 ])
49
50 return dataset.repeat(NUM_EPOCHS).map(element_fn).batch(BATCH_SIZE)
51
52
53 def make_federated_data(client_data, client_ids):
54 return [
55 preprocess(client_data.create_tf_dataset_for_client(x))
56 for x in client_ids
57 ]
58
59
60 NUM_EPOCHS = 10
61 BATCH_SIZE = 20
62
63
64 def make_remote_executor(inferred_cardinalities):
65 """Make remote executor."""
66
67 def create_worker_stack(ex):
68 ex = tff.framework.ThreadDelegatingExecutor(ex)
69 return tff.framework.ReferenceResolvingExecutor(ex)
70
71 client_ex = []
72 num_clients = inferred_cardinalities.get(tff.CLIENTS, None)
73 if num_clients:
74 print('Inferred that there are {} clients'.format(num_clients))
75 else:
76 print('No CLIENTS placement provided')
77
78 for _ in range(num_clients or 0):
79 channel = grpc.insecure_channel('{}:{}'.format(FLAGS.host, FLAGS.port))
80 remote_ex = tff.framework.RemoteExecutor(channel)
81 worker_stack = create_worker_stack(remote_ex)
82 client_ex.append(worker_stack)
83
84 federating_strategy_factory = tff.framework.FederatedResolvingStrategy.factory(
85 {
86 tff.SERVER: create_worker_stack(tff.framework.EagerTFExecutor()),
87 tff.CLIENTS: client_ex,
88 })
89 unplaced_ex = create_worker_stack(tff.framework.EagerTFExecutor())
90 federating_ex = tff.framework.FederatingExecutor(federating_strategy_factory,
91 unplaced_ex)
92 return tff.framework.ReferenceResolvingExecutor(federating_ex)
93
94
95 def main(argv):
96 if len(argv) > 1:
97 raise app.UsageError('Too many command-line arguments.')
98
99 warnings.simplefilter('ignore')
100
101 np.random.seed(0)
102
103 emnist_train, _ = tff.simulation.datasets.emnist.load_data()
104
105 sample_clients = emnist_train.client_ids[0:FLAGS.n_clients]
106
107 federated_train_data = make_federated_data(emnist_train, sample_clients)
108
109 example_dataset = emnist_train.create_tf_dataset_for_client(
110 emnist_train.client_ids[0])
111
112 preprocessed_example_dataset = preprocess(example_dataset)
113 input_spec = preprocessed_example_dataset.element_spec
114
115 def model_fn():
116 model = tf.keras.models.Sequential([
117 tf.keras.layers.Input(shape=(784,)),
118 tf.keras.layers.Dense(10, kernel_initializer='zeros'),
119 tf.keras.layers.Softmax(),
120 ])
121 return tff.learning.from_keras_model(
122 model,
123 input_spec=input_spec,
124 loss=tf.keras.losses.SparseCategoricalCrossentropy(),
125 metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
126
127 iterative_process = tff.learning.build_federated_averaging_process(
128 model_fn,
129 client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02))
130
131 factory = tff.framework.ResourceManagingExecutorFactory(make_remote_executor)
132 context = tff.framework.ExecutionContext(factory)
133 tff.framework.set_default_context(context)
134
135 state = iterative_process.initialize()
136
137 state, metrics = iterative_process.next(state, federated_train_data)
138 print('round 1, metrics={}'.format(metrics))
139
140 for round_num in range(2, FLAGS.n_rounds + 1):
141 state, metrics = iterative_process.next(state, federated_train_data)
142 print('round {:2d}, metrics={}'.format(round_num, metrics))
143
144
145 if __name__ == '__main__':
146 app.run(main)
147
[end of tensorflow_federated/python/examples/remote_execution/remote_executor_example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tensorflow_federated/python/examples/remote_execution/remote_executor_example.py b/tensorflow_federated/python/examples/remote_execution/remote_executor_example.py
--- a/tensorflow_federated/python/examples/remote_execution/remote_executor_example.py
+++ b/tensorflow_federated/python/examples/remote_execution/remote_executor_example.py
@@ -114,7 +114,7 @@
def model_fn():
model = tf.keras.models.Sequential([
- tf.keras.layers.Input(shape=(784,)),
+ tf.keras.layers.InputLayer(input_shape=(784,)),
tf.keras.layers.Dense(10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
| {"golden_diff": "diff --git a/tensorflow_federated/python/examples/remote_execution/remote_executor_example.py b/tensorflow_federated/python/examples/remote_execution/remote_executor_example.py\n--- a/tensorflow_federated/python/examples/remote_execution/remote_executor_example.py\n+++ b/tensorflow_federated/python/examples/remote_execution/remote_executor_example.py\n@@ -114,7 +114,7 @@\n \n def model_fn():\n model = tf.keras.models.Sequential([\n- tf.keras.layers.Input(shape=(784,)),\n+ tf.keras.layers.InputLayer(input_shape=(784,)),\n tf.keras.layers.Dense(10, kernel_initializer='zeros'),\n tf.keras.layers.Softmax(),\n ])\n", "issue": "Keras model in federated_learning_for_image_classification.ipynb throws warning\n**Describe the bug**\r\nKeras Sequential Model in [federated_learning_for_image_classification.ipynb](https://github.com/tensorflow/federated/blob/master/docs/tutorials/federated_learning_for_image_classification.ipynb) throws warning.\r\nThe model in the notebook is\r\n```python\r\ndef create_keras_model():\r\n return tf.keras.models.Sequential([\r\n tf.keras.layers.Input(shape=(784,)),\r\n tf.keras.layers.Dense(10, kernel_initializer='zeros'),\r\n tf.keras.layers.Softmax(),\r\n ])\r\n```\r\nWarning thrown:\r\n```python\r\nWARNING:tensorflow:Please add `keras.layers.InputLayer` instead of `keras.Input` to Sequential model. `keras.Input` is intended to be used by Functional model.\r\n<tensorflow.python.keras.engine.sequential.Sequential at 0x7f66178a46d0>\r\n```\r\n\r\nEasily fixed using the correct layer type:\r\n```python\r\ndef create_keras_model():\r\n return tf.keras.models.Sequential([\r\n tf.keras.layers.InputLayer(input_shape=(784,)),\r\n tf.keras.layers.Dense(10, kernel_initializer='zeros'),\r\n tf.keras.layers.Softmax(),\r\n ])\r\n```\r\n\r\n[colab](https://colab.research.google.com/drive/1LFgBiu9xUa-k92IW24fiSX_kVp7lb0SB?usp=sharing) notebook that reproduces the bug.\r\n\n", "before_files": [{"content": "# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Example showing how to run a multi-machine simulation.\n\nIn order to run this example, you must have a running instance of the\nExecutor Service, either locally or on Kubernetes.\n\nThe model trains EMNIST for a small number of rounds, but uses a RemoteExecutor\nto distribute the work to the ExecutorService.\n\"\"\"\n\nimport collections\nimport warnings\n\nfrom absl import app\nfrom absl import flags\nimport grpc\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('host', None, 'The host to connect to.')\nflags.mark_flag_as_required('host')\nflags.DEFINE_string('port', '8000', 'The port to connect to.')\nflags.DEFINE_integer('n_clients', 10, 'Number of clients.')\nflags.DEFINE_integer('n_rounds', 3, 'Number of rounds.')\n\n\ndef preprocess(dataset):\n\n def element_fn(element):\n return collections.OrderedDict([\n ('x', tf.reshape(element['pixels'], [-1])),\n ('y', tf.reshape(element['label'], [1])),\n ])\n\n return dataset.repeat(NUM_EPOCHS).map(element_fn).batch(BATCH_SIZE)\n\n\ndef make_federated_data(client_data, client_ids):\n return [\n preprocess(client_data.create_tf_dataset_for_client(x))\n for x in client_ids\n ]\n\n\nNUM_EPOCHS = 10\nBATCH_SIZE = 20\n\n\ndef make_remote_executor(inferred_cardinalities):\n \"\"\"Make remote executor.\"\"\"\n\n def create_worker_stack(ex):\n ex = tff.framework.ThreadDelegatingExecutor(ex)\n return tff.framework.ReferenceResolvingExecutor(ex)\n\n client_ex = []\n num_clients = inferred_cardinalities.get(tff.CLIENTS, None)\n if num_clients:\n print('Inferred that there are {} clients'.format(num_clients))\n else:\n print('No CLIENTS placement provided')\n\n for _ in range(num_clients or 0):\n channel = grpc.insecure_channel('{}:{}'.format(FLAGS.host, FLAGS.port))\n remote_ex = tff.framework.RemoteExecutor(channel)\n worker_stack = create_worker_stack(remote_ex)\n client_ex.append(worker_stack)\n\n federating_strategy_factory = tff.framework.FederatedResolvingStrategy.factory(\n {\n tff.SERVER: create_worker_stack(tff.framework.EagerTFExecutor()),\n tff.CLIENTS: client_ex,\n })\n unplaced_ex = create_worker_stack(tff.framework.EagerTFExecutor())\n federating_ex = tff.framework.FederatingExecutor(federating_strategy_factory,\n unplaced_ex)\n return tff.framework.ReferenceResolvingExecutor(federating_ex)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n warnings.simplefilter('ignore')\n\n np.random.seed(0)\n\n emnist_train, _ = tff.simulation.datasets.emnist.load_data()\n\n sample_clients = emnist_train.client_ids[0:FLAGS.n_clients]\n\n federated_train_data = make_federated_data(emnist_train, sample_clients)\n\n example_dataset = emnist_train.create_tf_dataset_for_client(\n emnist_train.client_ids[0])\n\n preprocessed_example_dataset = preprocess(example_dataset)\n input_spec = preprocessed_example_dataset.element_spec\n\n def model_fn():\n model = tf.keras.models.Sequential([\n tf.keras.layers.Input(shape=(784,)),\n tf.keras.layers.Dense(10, kernel_initializer='zeros'),\n tf.keras.layers.Softmax(),\n ])\n return tff.learning.from_keras_model(\n model,\n input_spec=input_spec,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])\n\n iterative_process = tff.learning.build_federated_averaging_process(\n model_fn,\n client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02))\n\n factory = tff.framework.ResourceManagingExecutorFactory(make_remote_executor)\n context = tff.framework.ExecutionContext(factory)\n tff.framework.set_default_context(context)\n\n state = iterative_process.initialize()\n\n state, metrics = iterative_process.next(state, federated_train_data)\n print('round 1, metrics={}'.format(metrics))\n\n for round_num in range(2, FLAGS.n_rounds + 1):\n state, metrics = iterative_process.next(state, federated_train_data)\n print('round {:2d}, metrics={}'.format(round_num, metrics))\n\n\nif __name__ == '__main__':\n app.run(main)\n", "path": "tensorflow_federated/python/examples/remote_execution/remote_executor_example.py"}]} | 2,325 | 156 |
gh_patches_debug_32843 | rasdani/github-patches | git_diff | nextcloud__appstore-693 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Password reset sends outdated reset token
When requesting a password reset link from apps.nextcloud.com, the token in the link of the reset e-mail does not work, the website reports:
> Bad API Token
>
> The password reset link was invalid, possibly because it has already been used. Please request a new password reset.
When requesting a new password reset, an e-mail with the very same token is sent. In consequence it is impossible to change the password.
## Details
* Firefox 72.0.2
## Steps to reproduce
1. "Forget" your password
2. Request password reset link
3. Receive e-mail and open reset link
4. Observe error, follow suggestion and request new link
5. Receive e-mail with exactly the same token
</issue>
<code>
[start of nextcloudappstore/user/forms.py]
1 from allauth.account.utils import filter_users_by_email, user_username, \
2 user_pk_to_url_str
3 from django import forms
4 from django.contrib.auth import get_user_model
5 from django.forms import EmailField, CharField, PasswordInput
6 from django.utils.translation import ugettext_lazy as _
7 from snowpenguin.django.recaptcha2.fields import ReCaptchaField
8 from snowpenguin.django.recaptcha2.widgets import ReCaptchaWidget
9
10
11 class SignupFormRecaptcha(forms.Form):
12 """integrate a recaptcha field."""
13 recaptcha = ReCaptchaField(widget=ReCaptchaWidget())
14 first_name = CharField(max_length=30, label=_('First name'))
15 last_name = CharField(max_length=30, label=_('Last name'))
16
17 def signup(self, request, user):
18 user.first_name = self.cleaned_data['first_name']
19 user.last_name = self.cleaned_data['last_name']
20 user.save()
21
22
23 class DeleteAccountForm(forms.Form):
24 email = EmailField(required=True, label=_('Your e-mail address'))
25
26 def __init__(self, *args, **kwargs):
27 self.user = kwargs.pop('user', None)
28 super().__init__(*args, **kwargs)
29
30 def clean_email(self):
31 email = self.cleaned_data.get('email')
32 if self.user and self.user.email == email:
33 return email
34 else:
35 raise forms.ValidationError(_(
36 'The given e-mail address does not match your e-mail address'))
37
38
39 class AccountForm(forms.ModelForm):
40 passwd = CharField(widget=PasswordInput(), label=_('Confirm password'),
41 help_text=_('Password is required to prevent '
42 'unauthorized users from changing your '
43 'email address and resetting your '
44 'password. This field does not update your '
45 'password!'))
46
47 class Meta:
48 model = get_user_model()
49 fields = ('first_name', 'last_name', 'email')
50
51 def clean_email(self):
52 value = self.cleaned_data['email']
53 users = filter_users_by_email(value)
54 if [u for u in users if u.pk != self.instance.pk]:
55 msg = _(
56 'This e-mail address is already associated with another '
57 'account.')
58 raise forms.ValidationError(msg)
59 return value
60
61 def clean_passwd(self):
62 value = self.cleaned_data['passwd']
63 if self.instance.check_password(value):
64 return value
65 else:
66 raise forms.ValidationError(_('Invalid password'))
67
68
69 class CustomResetPasswordForm(forms.Form):
70 # remove this class once issue #1307 is resolved django-allauth
71 email = forms.EmailField(
72 label=_("E-mail"),
73 required=True,
74 widget=forms.TextInput(attrs={
75 "type": "email",
76 "size": "30",
77 "placeholder": _("E-mail address"),
78 })
79 )
80
81 def clean_email(self):
82 email = self.cleaned_data["email"]
83 from allauth.account.adapter import get_adapter
84 email = get_adapter().clean_email(email)
85 self.users = filter_users_by_email(email)
86
87 return self.cleaned_data["email"]
88
89 def save(self, request, **kwargs):
90 from django.contrib.sites.shortcuts import get_current_site
91 current_site = get_current_site(request)
92 email = self.cleaned_data["email"]
93 from django.contrib.auth.tokens import default_token_generator
94 token_generator = kwargs.get("token_generator",
95 default_token_generator)
96
97 for user in self.users:
98 temp_key = token_generator.make_token(user)
99
100 # save it to the password reset model
101 # password_reset = PasswordReset(user=user, temp_key=temp_key)
102 # password_reset.save()
103
104 # send the password reset email
105 from django.urls import reverse
106 path = reverse("account_reset_password_from_key",
107 kwargs=dict(uidb36=user_pk_to_url_str(user),
108 key=temp_key))
109 from allauth.utils import build_absolute_uri
110 url = build_absolute_uri(
111 request, path)
112
113 context = {"current_site": current_site,
114 "user": user,
115 "password_reset_url": url,
116 "request": request}
117
118 from allauth.account import app_settings
119
120 if app_settings.AUTHENTICATION_METHOD \
121 != app_settings.AuthenticationMethod.EMAIL:
122 context['username'] = user_username(user)
123 from allauth.account.adapter import get_adapter
124 get_adapter(request).send_mail(
125 'account/email/password_reset_key',
126 email,
127 context)
128 return self.cleaned_data["email"]
129
[end of nextcloudappstore/user/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nextcloudappstore/user/forms.py b/nextcloudappstore/user/forms.py
--- a/nextcloudappstore/user/forms.py
+++ b/nextcloudappstore/user/forms.py
@@ -1,3 +1,4 @@
+from allauth.account.forms import EmailAwarePasswordResetTokenGenerator
from allauth.account.utils import filter_users_by_email, user_username, \
user_pk_to_url_str
from django import forms
@@ -82,7 +83,7 @@
email = self.cleaned_data["email"]
from allauth.account.adapter import get_adapter
email = get_adapter().clean_email(email)
- self.users = filter_users_by_email(email)
+ self.users = filter_users_by_email(email, is_active=True)
return self.cleaned_data["email"]
@@ -90,9 +91,7 @@
from django.contrib.sites.shortcuts import get_current_site
current_site = get_current_site(request)
email = self.cleaned_data["email"]
- from django.contrib.auth.tokens import default_token_generator
- token_generator = kwargs.get("token_generator",
- default_token_generator)
+ token_generator = EmailAwarePasswordResetTokenGenerator()
for user in self.users:
temp_key = token_generator.make_token(user)
@@ -118,7 +117,7 @@
from allauth.account import app_settings
if app_settings.AUTHENTICATION_METHOD \
- != app_settings.AuthenticationMethod.EMAIL:
+ != app_settings.AuthenticationMethod.EMAIL:
context['username'] = user_username(user)
from allauth.account.adapter import get_adapter
get_adapter(request).send_mail(
| {"golden_diff": "diff --git a/nextcloudappstore/user/forms.py b/nextcloudappstore/user/forms.py\n--- a/nextcloudappstore/user/forms.py\n+++ b/nextcloudappstore/user/forms.py\n@@ -1,3 +1,4 @@\n+from allauth.account.forms import EmailAwarePasswordResetTokenGenerator\n from allauth.account.utils import filter_users_by_email, user_username, \\\n user_pk_to_url_str\n from django import forms\n@@ -82,7 +83,7 @@\n email = self.cleaned_data[\"email\"]\n from allauth.account.adapter import get_adapter\n email = get_adapter().clean_email(email)\n- self.users = filter_users_by_email(email)\n+ self.users = filter_users_by_email(email, is_active=True)\n \n return self.cleaned_data[\"email\"]\n \n@@ -90,9 +91,7 @@\n from django.contrib.sites.shortcuts import get_current_site\n current_site = get_current_site(request)\n email = self.cleaned_data[\"email\"]\n- from django.contrib.auth.tokens import default_token_generator\n- token_generator = kwargs.get(\"token_generator\",\n- default_token_generator)\n+ token_generator = EmailAwarePasswordResetTokenGenerator()\n \n for user in self.users:\n temp_key = token_generator.make_token(user)\n@@ -118,7 +117,7 @@\n from allauth.account import app_settings\n \n if app_settings.AUTHENTICATION_METHOD \\\n- != app_settings.AuthenticationMethod.EMAIL:\n+ != app_settings.AuthenticationMethod.EMAIL:\n context['username'] = user_username(user)\n from allauth.account.adapter import get_adapter\n get_adapter(request).send_mail(\n", "issue": "Password reset sends outdated reset token\nWhen requesting a password reset link from apps.nextcloud.com, the token in the link of the reset e-mail does not work, the website reports:\r\n\r\n> Bad API Token\r\n> \r\n> The password reset link was invalid, possibly because it has already been used. Please request a new password reset.\r\n\r\nWhen requesting a new password reset, an e-mail with the very same token is sent. In consequence it is impossible to change the password.\r\n\r\n## Details\r\n\r\n* Firefox 72.0.2\r\n\r\n## Steps to reproduce\r\n\r\n1. \"Forget\" your password\r\n2. Request password reset link\r\n3. Receive e-mail and open reset link\r\n4. Observe error, follow suggestion and request new link\r\n5. Receive e-mail with exactly the same token\r\n\n", "before_files": [{"content": "from allauth.account.utils import filter_users_by_email, user_username, \\\n user_pk_to_url_str\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.forms import EmailField, CharField, PasswordInput\nfrom django.utils.translation import ugettext_lazy as _\nfrom snowpenguin.django.recaptcha2.fields import ReCaptchaField\nfrom snowpenguin.django.recaptcha2.widgets import ReCaptchaWidget\n\n\nclass SignupFormRecaptcha(forms.Form):\n \"\"\"integrate a recaptcha field.\"\"\"\n recaptcha = ReCaptchaField(widget=ReCaptchaWidget())\n first_name = CharField(max_length=30, label=_('First name'))\n last_name = CharField(max_length=30, label=_('Last name'))\n\n def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n\n\nclass DeleteAccountForm(forms.Form):\n email = EmailField(required=True, label=_('Your e-mail address'))\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super().__init__(*args, **kwargs)\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n if self.user and self.user.email == email:\n return email\n else:\n raise forms.ValidationError(_(\n 'The given e-mail address does not match your e-mail address'))\n\n\nclass AccountForm(forms.ModelForm):\n passwd = CharField(widget=PasswordInput(), label=_('Confirm password'),\n help_text=_('Password is required to prevent '\n 'unauthorized users from changing your '\n 'email address and resetting your '\n 'password. This field does not update your '\n 'password!'))\n\n class Meta:\n model = get_user_model()\n fields = ('first_name', 'last_name', 'email')\n\n def clean_email(self):\n value = self.cleaned_data['email']\n users = filter_users_by_email(value)\n if [u for u in users if u.pk != self.instance.pk]:\n msg = _(\n 'This e-mail address is already associated with another '\n 'account.')\n raise forms.ValidationError(msg)\n return value\n\n def clean_passwd(self):\n value = self.cleaned_data['passwd']\n if self.instance.check_password(value):\n return value\n else:\n raise forms.ValidationError(_('Invalid password'))\n\n\nclass CustomResetPasswordForm(forms.Form):\n # remove this class once issue #1307 is resolved django-allauth\n email = forms.EmailField(\n label=_(\"E-mail\"),\n required=True,\n widget=forms.TextInput(attrs={\n \"type\": \"email\",\n \"size\": \"30\",\n \"placeholder\": _(\"E-mail address\"),\n })\n )\n\n def clean_email(self):\n email = self.cleaned_data[\"email\"]\n from allauth.account.adapter import get_adapter\n email = get_adapter().clean_email(email)\n self.users = filter_users_by_email(email)\n\n return self.cleaned_data[\"email\"]\n\n def save(self, request, **kwargs):\n from django.contrib.sites.shortcuts import get_current_site\n current_site = get_current_site(request)\n email = self.cleaned_data[\"email\"]\n from django.contrib.auth.tokens import default_token_generator\n token_generator = kwargs.get(\"token_generator\",\n default_token_generator)\n\n for user in self.users:\n temp_key = token_generator.make_token(user)\n\n # save it to the password reset model\n # password_reset = PasswordReset(user=user, temp_key=temp_key)\n # password_reset.save()\n\n # send the password reset email\n from django.urls import reverse\n path = reverse(\"account_reset_password_from_key\",\n kwargs=dict(uidb36=user_pk_to_url_str(user),\n key=temp_key))\n from allauth.utils import build_absolute_uri\n url = build_absolute_uri(\n request, path)\n\n context = {\"current_site\": current_site,\n \"user\": user,\n \"password_reset_url\": url,\n \"request\": request}\n\n from allauth.account import app_settings\n\n if app_settings.AUTHENTICATION_METHOD \\\n != app_settings.AuthenticationMethod.EMAIL:\n context['username'] = user_username(user)\n from allauth.account.adapter import get_adapter\n get_adapter(request).send_mail(\n 'account/email/password_reset_key',\n email,\n context)\n return self.cleaned_data[\"email\"]\n", "path": "nextcloudappstore/user/forms.py"}]} | 1,924 | 350 |
gh_patches_debug_24199 | rasdani/github-patches | git_diff | LibraryOfCongress__concordia-782 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove Latest page URL, in nav bar, and redirect
Community managers want to consolidate `/latest` in to the `/about`. Most of the communication and everyday updates happen in History Hub, there is less usage for a dedicated latest updates page.
Once the URL is removed, the CMs will move the content from `/latest` to `/about`.
Acceptance criteria:
- [x] Remove URL
- [x] Any link to `/latest` to be redirected to `/about`
- [x] Move content (CMs will do this using the static page editor)
</issue>
<code>
[start of concordia/urls.py]
1 from django.conf import settings
2 from django.conf.urls import url
3 from django.contrib import admin
4 from django.http import Http404, HttpResponseForbidden
5 from django.urls import include, path
6 from django.views.defaults import page_not_found, permission_denied, server_error
7
8 from exporter import views as exporter_views
9
10 from . import views
11
12 tx_urlpatterns = (
13 [
14 path("", views.CampaignListView.as_view(), name="campaign-list"),
15 path(
16 "<slug:slug>/", views.CampaignDetailView.as_view(), name="campaign-detail"
17 ),
18 path(
19 "<slug:campaign_slug>/export/csv/",
20 exporter_views.ExportCampaignToCSV.as_view(),
21 name="campaign-export-csv",
22 ),
23 path(
24 "<slug:campaign_slug>/export/bagit/",
25 exporter_views.ExportCampaignToBagit.as_view(),
26 name="campaign-export-bagit",
27 ),
28 path(
29 "<slug:campaign_slug>/<slug:project_slug>/export/bagit/",
30 exporter_views.ExportProjectToBagIt.as_view(),
31 name="project-export-bagit",
32 ),
33 path(
34 "<slug:campaign_slug>/<slug:project_slug>/<slug:item_id>/export/bagit/",
35 exporter_views.ExportItemToBagIt.as_view(),
36 name="item-export-bagit",
37 ),
38 path(
39 "<slug:campaign_slug>/report/",
40 views.ReportCampaignView.as_view(),
41 name="campaign-report",
42 ),
43 path(
44 "<slug:campaign_slug>/<slug:project_slug>/<slug:item_id>/<slug:slug>/",
45 views.AssetDetailView.as_view(),
46 name="asset-detail",
47 ),
48 # n.b. this must be above project-detail to avoid being seen as a project slug:
49 path(
50 "<slug:campaign_slug>/next-transcribable-asset/",
51 views.redirect_to_next_transcribable_asset,
52 name="redirect-to-next-transcribable-asset",
53 ),
54 path(
55 "<slug:campaign_slug>/<slug:slug>/",
56 views.ProjectDetailView.as_view(),
57 name="project-detail",
58 ),
59 path(
60 "<slug:campaign_slug>/<slug:project_slug>/<slug:item_id>/",
61 views.ItemDetailView.as_view(),
62 name="item-detail",
63 ),
64 ],
65 "transcriptions",
66 )
67
68 urlpatterns = [
69 path("", views.HomeView.as_view(), name="homepage"),
70 path("healthz", views.healthz, name="health-check"),
71 path("about/", views.simple_page, name="about"),
72 path("help-center/", views.simple_page, name="help-center"),
73 path("help-center/welcome-guide/", views.simple_page, name="welcome-guide"),
74 path("help-center/how-to-transcribe/", views.simple_page, name="how-to-transcribe"),
75 path("help-center/how-to-review/", views.simple_page, name="how-to-review"),
76 path("help-center/how-to-tag/", views.simple_page, name="how-to-tag"),
77 path("for-educators/", views.simple_page, name="for-educators"),
78 path("latest/", views.simple_page, name="latest"),
79 path("questions/", views.simple_page, name="questions"),
80 path("contact/", views.ContactUsView.as_view(), name="contact"),
81 path("campaigns/", include(tx_urlpatterns, namespace="transcriptions")),
82 path(
83 "reserve-asset-for-transcription/<int:asset_pk>/",
84 views.reserve_asset_transcription,
85 name="reserve-asset-for-transcription",
86 ),
87 path(
88 "assets/<int:asset_pk>/transcriptions/save/",
89 views.save_transcription,
90 name="save-transcription",
91 ),
92 path(
93 "transcriptions/<int:pk>/submit/",
94 views.submit_transcription,
95 name="submit-transcription",
96 ),
97 path(
98 "transcriptions/<int:pk>/review/",
99 views.review_transcription,
100 name="review-transcription",
101 ),
102 path("assets/<int:asset_pk>/tags/submit/", views.submit_tags, name="submit-tags"),
103 path("account/ajax-status/", views.ajax_session_status, name="ajax-session-status"),
104 path("account/ajax-messages/", views.ajax_messages, name="ajax-messages"),
105 path(
106 "account/register/",
107 views.ConcordiaRegistrationView.as_view(),
108 name="registration_register",
109 ),
110 path(
111 "account/login/", views.ConcordiaLoginView.as_view(), name="registration_login"
112 ),
113 path("account/profile/", views.AccountProfileView.as_view(), name="user-profile"),
114 path("account/", include("django_registration.backends.activation.urls")),
115 path("account/", include("django.contrib.auth.urls")),
116 path("captcha/ajax/", views.ajax_captcha, name="ajax-captcha"),
117 path("captcha/", include("captcha.urls")),
118 path("admin/", admin.site.urls),
119 # Internal support assists:
120 path("maintenance-mode/", include("maintenance_mode.urls")),
121 path("error/500/", server_error),
122 path("error/404/", page_not_found, {"exception": Http404()}),
123 path("error/429/", views.ratelimit_view),
124 path("error/403/", permission_denied, {"exception": HttpResponseForbidden()}),
125 url("", include("django_prometheus_metrics.urls")),
126 path("robots.txt", include("robots.urls")),
127 ]
128
129 if settings.DEBUG:
130 import debug_toolbar
131 from django.conf.urls.static import static
132
133 urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
134
135 urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
136
[end of concordia/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/concordia/urls.py b/concordia/urls.py
--- a/concordia/urls.py
+++ b/concordia/urls.py
@@ -4,6 +4,7 @@
from django.http import Http404, HttpResponseForbidden
from django.urls import include, path
from django.views.defaults import page_not_found, permission_denied, server_error
+from django.views.generic import RedirectView
from exporter import views as exporter_views
@@ -75,7 +76,10 @@
path("help-center/how-to-review/", views.simple_page, name="how-to-review"),
path("help-center/how-to-tag/", views.simple_page, name="how-to-tag"),
path("for-educators/", views.simple_page, name="for-educators"),
- path("latest/", views.simple_page, name="latest"),
+ path(
+ "latest/",
+ RedirectView.as_view(pattern_name="about", permanent=True, query_string=True),
+ ),
path("questions/", views.simple_page, name="questions"),
path("contact/", views.ContactUsView.as_view(), name="contact"),
path("campaigns/", include(tx_urlpatterns, namespace="transcriptions")),
| {"golden_diff": "diff --git a/concordia/urls.py b/concordia/urls.py\n--- a/concordia/urls.py\n+++ b/concordia/urls.py\n@@ -4,6 +4,7 @@\n from django.http import Http404, HttpResponseForbidden\n from django.urls import include, path\n from django.views.defaults import page_not_found, permission_denied, server_error\n+from django.views.generic import RedirectView\n \n from exporter import views as exporter_views\n \n@@ -75,7 +76,10 @@\n path(\"help-center/how-to-review/\", views.simple_page, name=\"how-to-review\"),\n path(\"help-center/how-to-tag/\", views.simple_page, name=\"how-to-tag\"),\n path(\"for-educators/\", views.simple_page, name=\"for-educators\"),\n- path(\"latest/\", views.simple_page, name=\"latest\"),\n+ path(\n+ \"latest/\",\n+ RedirectView.as_view(pattern_name=\"about\", permanent=True, query_string=True),\n+ ),\n path(\"questions/\", views.simple_page, name=\"questions\"),\n path(\"contact/\", views.ContactUsView.as_view(), name=\"contact\"),\n path(\"campaigns/\", include(tx_urlpatterns, namespace=\"transcriptions\")),\n", "issue": "Remove Latest page URL, in nav bar, and redirect\nCommunity managers want to consolidate `/latest` in to the `/about`. Most of the communication and everyday updates happen in History Hub, there is less usage for a dedicated latest updates page. \r\n\r\nOnce the URL is removed, the CMs will move the content from `/latest` to `/about`. \r\n\r\nAcceptance criteria: \r\n- [x] Remove URL\r\n- [x] Any link to `/latest` to be redirected to `/about` \r\n- [x] Move content (CMs will do this using the static page editor) \n", "before_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.urls import include, path\nfrom django.views.defaults import page_not_found, permission_denied, server_error\n\nfrom exporter import views as exporter_views\n\nfrom . import views\n\ntx_urlpatterns = (\n [\n path(\"\", views.CampaignListView.as_view(), name=\"campaign-list\"),\n path(\n \"<slug:slug>/\", views.CampaignDetailView.as_view(), name=\"campaign-detail\"\n ),\n path(\n \"<slug:campaign_slug>/export/csv/\",\n exporter_views.ExportCampaignToCSV.as_view(),\n name=\"campaign-export-csv\",\n ),\n path(\n \"<slug:campaign_slug>/export/bagit/\",\n exporter_views.ExportCampaignToBagit.as_view(),\n name=\"campaign-export-bagit\",\n ),\n path(\n \"<slug:campaign_slug>/<slug:project_slug>/export/bagit/\",\n exporter_views.ExportProjectToBagIt.as_view(),\n name=\"project-export-bagit\",\n ),\n path(\n \"<slug:campaign_slug>/<slug:project_slug>/<slug:item_id>/export/bagit/\",\n exporter_views.ExportItemToBagIt.as_view(),\n name=\"item-export-bagit\",\n ),\n path(\n \"<slug:campaign_slug>/report/\",\n views.ReportCampaignView.as_view(),\n name=\"campaign-report\",\n ),\n path(\n \"<slug:campaign_slug>/<slug:project_slug>/<slug:item_id>/<slug:slug>/\",\n views.AssetDetailView.as_view(),\n name=\"asset-detail\",\n ),\n # n.b. this must be above project-detail to avoid being seen as a project slug:\n path(\n \"<slug:campaign_slug>/next-transcribable-asset/\",\n views.redirect_to_next_transcribable_asset,\n name=\"redirect-to-next-transcribable-asset\",\n ),\n path(\n \"<slug:campaign_slug>/<slug:slug>/\",\n views.ProjectDetailView.as_view(),\n name=\"project-detail\",\n ),\n path(\n \"<slug:campaign_slug>/<slug:project_slug>/<slug:item_id>/\",\n views.ItemDetailView.as_view(),\n name=\"item-detail\",\n ),\n ],\n \"transcriptions\",\n)\n\nurlpatterns = [\n path(\"\", views.HomeView.as_view(), name=\"homepage\"),\n path(\"healthz\", views.healthz, name=\"health-check\"),\n path(\"about/\", views.simple_page, name=\"about\"),\n path(\"help-center/\", views.simple_page, name=\"help-center\"),\n path(\"help-center/welcome-guide/\", views.simple_page, name=\"welcome-guide\"),\n path(\"help-center/how-to-transcribe/\", views.simple_page, name=\"how-to-transcribe\"),\n path(\"help-center/how-to-review/\", views.simple_page, name=\"how-to-review\"),\n path(\"help-center/how-to-tag/\", views.simple_page, name=\"how-to-tag\"),\n path(\"for-educators/\", views.simple_page, name=\"for-educators\"),\n path(\"latest/\", views.simple_page, name=\"latest\"),\n path(\"questions/\", views.simple_page, name=\"questions\"),\n path(\"contact/\", views.ContactUsView.as_view(), name=\"contact\"),\n path(\"campaigns/\", include(tx_urlpatterns, namespace=\"transcriptions\")),\n path(\n \"reserve-asset-for-transcription/<int:asset_pk>/\",\n views.reserve_asset_transcription,\n name=\"reserve-asset-for-transcription\",\n ),\n path(\n \"assets/<int:asset_pk>/transcriptions/save/\",\n views.save_transcription,\n name=\"save-transcription\",\n ),\n path(\n \"transcriptions/<int:pk>/submit/\",\n views.submit_transcription,\n name=\"submit-transcription\",\n ),\n path(\n \"transcriptions/<int:pk>/review/\",\n views.review_transcription,\n name=\"review-transcription\",\n ),\n path(\"assets/<int:asset_pk>/tags/submit/\", views.submit_tags, name=\"submit-tags\"),\n path(\"account/ajax-status/\", views.ajax_session_status, name=\"ajax-session-status\"),\n path(\"account/ajax-messages/\", views.ajax_messages, name=\"ajax-messages\"),\n path(\n \"account/register/\",\n views.ConcordiaRegistrationView.as_view(),\n name=\"registration_register\",\n ),\n path(\n \"account/login/\", views.ConcordiaLoginView.as_view(), name=\"registration_login\"\n ),\n path(\"account/profile/\", views.AccountProfileView.as_view(), name=\"user-profile\"),\n path(\"account/\", include(\"django_registration.backends.activation.urls\")),\n path(\"account/\", include(\"django.contrib.auth.urls\")),\n path(\"captcha/ajax/\", views.ajax_captcha, name=\"ajax-captcha\"),\n path(\"captcha/\", include(\"captcha.urls\")),\n path(\"admin/\", admin.site.urls),\n # Internal support assists:\n path(\"maintenance-mode/\", include(\"maintenance_mode.urls\")),\n path(\"error/500/\", server_error),\n path(\"error/404/\", page_not_found, {\"exception\": Http404()}),\n path(\"error/429/\", views.ratelimit_view),\n path(\"error/403/\", permission_denied, {\"exception\": HttpResponseForbidden()}),\n url(\"\", include(\"django_prometheus_metrics.urls\")),\n path(\"robots.txt\", include(\"robots.urls\")),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n from django.conf.urls.static import static\n\n urlpatterns = [path(\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "concordia/urls.py"}]} | 2,113 | 258 |
gh_patches_debug_34612 | rasdani/github-patches | git_diff | Textualize__textual-2305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
It's possible to somehow break the working of `TabbedContent`
This one isn't exactly easy to explain, but hopefully easy to see and recreate with the following code (which is a distilled version of what I'm doing in a bigger application, where I found it).
Worth noting: the motivation here is that (in the application I'm working on) the `TabbedContent` is acting as a sidebar, where each pane has content that can be focused, and I want folk to be people to switch tabs without needing to navigate to the tab bar and back into the pane again. As such there's some bindings in place that call into the `Tabs` and uses their prev/next tab actions.
```python
from textual.app import App, ComposeResult
from textual.binding import Binding
from textual.containers import Vertical
from textual.widgets import Header, Footer, TabbedContent, TabPane, Tabs, DirectoryTree
class SelfFocusPane(TabPane):
DEFAULT_CSS = """
SelfFocusPane {
height: 100% !important;
}
DirectoryTree {
width: 100%;
height: 100% !important;
}
"""
def compose( self ) -> ComposeResult:
"""Compose the child widgets."""
yield DirectoryTree(".")
def on_show( self ) -> None:
self.query_one( DirectoryTree ).focus()
class TabbedContentIssueApp( App[ None ] ):
CSS = """
Screen {
align: center middle;
}
Screen > Vertical {
width: 42;
}
TabbedContent {
border: round red;
max-width: 40;
height: 100%;
}
ContentSwitcher {
height: 1fr !important;
}
"""
BINDINGS = [
Binding( "shift+left", "previous", "Previous" ),
Binding( "shift+right", "next", "Next" ),
]
def compose( self ) -> ComposeResult:
yield Header()
with Vertical():
with TabbedContent():
for n in range( 6 ):
yield SelfFocusPane( f"Tab {n}")
yield Footer()
def on_mount(self) -> None:
self.query_one(Tabs).focus()
def action_previous(self) -> None:
self.query_one(Tabs).action_previous_tab()
def action_next(self) -> None:
self.query_one(Tabs).action_next_tab()
if __name__ == "__main__":
TabbedContentIssueApp().run()
```
In experimenting, it looks like the `SelfFocusPane.on_show` setting focus to the child of the pane is key here; remove that and I can't recreate the issue.
</issue>
<code>
[start of src/textual/widgets/_tabbed_content.py]
1 from __future__ import annotations
2
3 from itertools import zip_longest
4
5 from rich.repr import Result
6 from rich.text import Text, TextType
7
8 from ..app import ComposeResult
9 from ..message import Message
10 from ..reactive import reactive
11 from ..widget import Widget
12 from ._content_switcher import ContentSwitcher
13 from ._tabs import Tab, Tabs
14
15 __all__ = [
16 "ContentTab",
17 "TabbedContent",
18 "TabPane",
19 ]
20
21
22 class ContentTab(Tab):
23 """A Tab with an associated content id."""
24
25 def __init__(self, label: Text, content_id: str):
26 """Initialize a ContentTab.
27
28 Args:
29 label: The label to be displayed within the tab.
30 content_id: The id of the content associated with the tab.
31 """
32 super().__init__(label, id=content_id)
33
34
35 class TabPane(Widget):
36 """A container for switchable content, with additional title.
37
38 This widget is intended to be used with [TabbedContent][textual.widgets.TabbedContent].
39
40 """
41
42 DEFAULT_CSS = """
43 TabPane {
44 height: auto;
45 padding: 1 2;
46 }
47 """
48
49 def __init__(
50 self,
51 title: TextType,
52 *children: Widget,
53 name: str | None = None,
54 id: str | None = None,
55 classes: str | None = None,
56 disabled: bool = False,
57 ):
58 """Initialize a TabPane.
59
60 Args:
61 title: Title of the TabPane (will be displayed in a tab label).
62 *children: Widget to go inside the TabPane.
63 name: Optional name for the TabPane.
64 id: Optional ID for the TabPane.
65 classes: Optional initial classes for the widget.
66 disabled: Whether the TabPane is disabled or not.
67 """
68 self._title = self.render_str(title)
69 super().__init__(
70 *children, name=name, id=id, classes=classes, disabled=disabled
71 )
72
73
74 class TabbedContent(Widget):
75 """A container with associated tabs to toggle content visibility."""
76
77 DEFAULT_CSS = """
78 TabbedContent {
79 height: auto;
80 }
81 TabbedContent > ContentSwitcher {
82 height: auto;
83 }
84 """
85
86 active: reactive[str] = reactive("", init=False)
87 """The ID of the active tab, or empty string if none are active."""
88
89 class TabActivated(Message):
90 """Posted when the active tab changes."""
91
92 def __init__(self, tabbed_content: TabbedContent, tab: Tab) -> None:
93 """Initialize message.
94
95 Args:
96 tabbed_content: The TabbedContent widget.
97 tab: The Tab widget that was selected (contains the tab label).
98 """
99 self.tabbed_content = tabbed_content
100 self.tab = tab
101 super().__init__()
102
103 def __rich_repr__(self) -> Result:
104 yield self.tabbed_content
105 yield self.tab
106
107 def __init__(self, *titles: TextType, initial: str = "") -> None:
108 """Initialize a TabbedContent widgets.
109
110 Args:
111 *titles: Positional argument will be used as title.
112 initial: The id of the initial tab, or empty string to select the first tab.
113 """
114 self.titles = [self.render_str(title) for title in titles]
115 self._tab_content: list[Widget] = []
116 self._initial = initial
117 super().__init__()
118
119 def validate_active(self, active: str) -> str:
120 """It doesn't make sense for `active` to be an empty string.
121
122 Args:
123 active: Attribute to be validated.
124
125 Returns:
126 Value of `active`.
127
128 Raises:
129 ValueError: If the active attribute is set to empty string.
130 """
131 if not active:
132 raise ValueError("'active' tab must not be empty string.")
133 return active
134
135 def compose(self) -> ComposeResult:
136 """Compose the tabbed content."""
137
138 def set_id(content: TabPane, new_id: str) -> TabPane:
139 """Set an id on the content, if not already present.
140
141 Args:
142 content: a TabPane.
143 new_id: New `is` attribute, if it is not already set.
144
145 Returns:
146 The same TabPane.
147 """
148 if content.id is None:
149 content.id = new_id
150 return content
151
152 # Wrap content in a `TabPane` if required.
153 pane_content = [
154 (
155 set_id(content, f"tab-{index}")
156 if isinstance(content, TabPane)
157 else TabPane(
158 title or self.render_str(f"Tab {index}"), content, id=f"tab-{index}"
159 )
160 )
161 for index, (title, content) in enumerate(
162 zip_longest(self.titles, self._tab_content), 1
163 )
164 ]
165 # Get a tab for each pane
166 tabs = [
167 ContentTab(content._title, content.id or "") for content in pane_content
168 ]
169 # Yield the tabs
170 yield Tabs(*tabs, active=self._initial or None)
171 # Yield the content switcher and panes
172 with ContentSwitcher(initial=self._initial or None):
173 yield from pane_content
174
175 def compose_add_child(self, widget: Widget) -> None:
176 """When using the context manager compose syntax, we want to attach nodes to the switcher.
177
178 Args:
179 widget: A Widget to add.
180 """
181 self._tab_content.append(widget)
182
183 def _on_tabs_tab_activated(self, event: Tabs.TabActivated) -> None:
184 """User clicked a tab."""
185 event.stop()
186 switcher = self.get_child_by_type(ContentSwitcher)
187 assert isinstance(event.tab, ContentTab)
188 switcher.current = event.tab.id
189 self.active = event.tab.id
190 self.post_message(
191 TabbedContent.TabActivated(
192 tabbed_content=self,
193 tab=event.tab,
194 )
195 )
196
197 def _on_tabs_cleared(self, event: Tabs.Cleared) -> None:
198 """All tabs were removed."""
199 event.stop()
200
201 def watch_active(self, active: str) -> None:
202 """Switch tabs when the active attributes changes."""
203 self.get_child_by_type(Tabs).active = active
204
[end of src/textual/widgets/_tabbed_content.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/textual/widgets/_tabbed_content.py b/src/textual/widgets/_tabbed_content.py
--- a/src/textual/widgets/_tabbed_content.py
+++ b/src/textual/widgets/_tabbed_content.py
@@ -83,9 +83,6 @@
}
"""
- active: reactive[str] = reactive("", init=False)
- """The ID of the active tab, or empty string if none are active."""
-
class TabActivated(Message):
"""Posted when the active tab changes."""
@@ -116,21 +113,16 @@
self._initial = initial
super().__init__()
- def validate_active(self, active: str) -> str:
- """It doesn't make sense for `active` to be an empty string.
-
- Args:
- active: Attribute to be validated.
-
- Returns:
- Value of `active`.
+ @property
+ def active(self) -> str:
+ """The ID of the active tab, or empty string if none are active."""
+ return self.get_child_by_type(Tabs).active
- Raises:
- ValueError: If the active attribute is set to empty string.
- """
+ @active.setter
+ def active(self, active: str) -> None:
if not active:
raise ValueError("'active' tab must not be empty string.")
- return active
+ self.get_child_by_type(Tabs).active = active
def compose(self) -> ComposeResult:
"""Compose the tabbed content."""
@@ -186,7 +178,6 @@
switcher = self.get_child_by_type(ContentSwitcher)
assert isinstance(event.tab, ContentTab)
switcher.current = event.tab.id
- self.active = event.tab.id
self.post_message(
TabbedContent.TabActivated(
tabbed_content=self,
@@ -197,7 +188,3 @@
def _on_tabs_cleared(self, event: Tabs.Cleared) -> None:
"""All tabs were removed."""
event.stop()
-
- def watch_active(self, active: str) -> None:
- """Switch tabs when the active attributes changes."""
- self.get_child_by_type(Tabs).active = active
| {"golden_diff": "diff --git a/src/textual/widgets/_tabbed_content.py b/src/textual/widgets/_tabbed_content.py\n--- a/src/textual/widgets/_tabbed_content.py\n+++ b/src/textual/widgets/_tabbed_content.py\n@@ -83,9 +83,6 @@\n }\n \"\"\"\n \n- active: reactive[str] = reactive(\"\", init=False)\n- \"\"\"The ID of the active tab, or empty string if none are active.\"\"\"\n-\n class TabActivated(Message):\n \"\"\"Posted when the active tab changes.\"\"\"\n \n@@ -116,21 +113,16 @@\n self._initial = initial\n super().__init__()\n \n- def validate_active(self, active: str) -> str:\n- \"\"\"It doesn't make sense for `active` to be an empty string.\n-\n- Args:\n- active: Attribute to be validated.\n-\n- Returns:\n- Value of `active`.\n+ @property\n+ def active(self) -> str:\n+ \"\"\"The ID of the active tab, or empty string if none are active.\"\"\"\n+ return self.get_child_by_type(Tabs).active\n \n- Raises:\n- ValueError: If the active attribute is set to empty string.\n- \"\"\"\n+ @active.setter\n+ def active(self, active: str) -> None:\n if not active:\n raise ValueError(\"'active' tab must not be empty string.\")\n- return active\n+ self.get_child_by_type(Tabs).active = active\n \n def compose(self) -> ComposeResult:\n \"\"\"Compose the tabbed content.\"\"\"\n@@ -186,7 +178,6 @@\n switcher = self.get_child_by_type(ContentSwitcher)\n assert isinstance(event.tab, ContentTab)\n switcher.current = event.tab.id\n- self.active = event.tab.id\n self.post_message(\n TabbedContent.TabActivated(\n tabbed_content=self,\n@@ -197,7 +188,3 @@\n def _on_tabs_cleared(self, event: Tabs.Cleared) -> None:\n \"\"\"All tabs were removed.\"\"\"\n event.stop()\n-\n- def watch_active(self, active: str) -> None:\n- \"\"\"Switch tabs when the active attributes changes.\"\"\"\n- self.get_child_by_type(Tabs).active = active\n", "issue": "It's possible to somehow break the working of `TabbedContent`\nThis one isn't exactly easy to explain, but hopefully easy to see and recreate with the following code (which is a distilled version of what I'm doing in a bigger application, where I found it).\r\n\r\nWorth noting: the motivation here is that (in the application I'm working on) the `TabbedContent` is acting as a sidebar, where each pane has content that can be focused, and I want folk to be people to switch tabs without needing to navigate to the tab bar and back into the pane again. As such there's some bindings in place that call into the `Tabs` and uses their prev/next tab actions.\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.binding import Binding\r\nfrom textual.containers import Vertical\r\nfrom textual.widgets import Header, Footer, TabbedContent, TabPane, Tabs, DirectoryTree\r\n\r\nclass SelfFocusPane(TabPane):\r\n\r\n DEFAULT_CSS = \"\"\"\r\n SelfFocusPane {\r\n height: 100% !important;\r\n }\r\n DirectoryTree {\r\n width: 100%;\r\n height: 100% !important;\r\n }\r\n \"\"\"\r\n\r\n def compose( self ) -> ComposeResult:\r\n \"\"\"Compose the child widgets.\"\"\"\r\n yield DirectoryTree(\".\")\r\n\r\n def on_show( self ) -> None:\r\n self.query_one( DirectoryTree ).focus()\r\n\r\nclass TabbedContentIssueApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n Screen {\r\n align: center middle;\r\n }\r\n\r\n Screen > Vertical {\r\n width: 42;\r\n }\r\n\r\n TabbedContent {\r\n border: round red;\r\n max-width: 40;\r\n height: 100%;\r\n }\r\n\r\n ContentSwitcher {\r\n height: 1fr !important;\r\n }\r\n \"\"\"\r\n\r\n BINDINGS = [\r\n Binding( \"shift+left\", \"previous\", \"Previous\" ),\r\n Binding( \"shift+right\", \"next\", \"Next\" ),\r\n ]\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n with Vertical():\r\n with TabbedContent():\r\n for n in range( 6 ):\r\n yield SelfFocusPane( f\"Tab {n}\")\r\n yield Footer()\r\n\r\n def on_mount(self) -> None:\r\n self.query_one(Tabs).focus()\r\n\r\n def action_previous(self) -> None:\r\n self.query_one(Tabs).action_previous_tab()\r\n\r\n def action_next(self) -> None:\r\n self.query_one(Tabs).action_next_tab()\r\n\r\nif __name__ == \"__main__\":\r\n TabbedContentIssueApp().run()\r\n```\r\n\r\nIn experimenting, it looks like the `SelfFocusPane.on_show` setting focus to the child of the pane is key here; remove that and I can't recreate the issue.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom itertools import zip_longest\n\nfrom rich.repr import Result\nfrom rich.text import Text, TextType\n\nfrom ..app import ComposeResult\nfrom ..message import Message\nfrom ..reactive import reactive\nfrom ..widget import Widget\nfrom ._content_switcher import ContentSwitcher\nfrom ._tabs import Tab, Tabs\n\n__all__ = [\n \"ContentTab\",\n \"TabbedContent\",\n \"TabPane\",\n]\n\n\nclass ContentTab(Tab):\n \"\"\"A Tab with an associated content id.\"\"\"\n\n def __init__(self, label: Text, content_id: str):\n \"\"\"Initialize a ContentTab.\n\n Args:\n label: The label to be displayed within the tab.\n content_id: The id of the content associated with the tab.\n \"\"\"\n super().__init__(label, id=content_id)\n\n\nclass TabPane(Widget):\n \"\"\"A container for switchable content, with additional title.\n\n This widget is intended to be used with [TabbedContent][textual.widgets.TabbedContent].\n\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n TabPane {\n height: auto;\n padding: 1 2;\n }\n \"\"\"\n\n def __init__(\n self,\n title: TextType,\n *children: Widget,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n \"\"\"Initialize a TabPane.\n\n Args:\n title: Title of the TabPane (will be displayed in a tab label).\n *children: Widget to go inside the TabPane.\n name: Optional name for the TabPane.\n id: Optional ID for the TabPane.\n classes: Optional initial classes for the widget.\n disabled: Whether the TabPane is disabled or not.\n \"\"\"\n self._title = self.render_str(title)\n super().__init__(\n *children, name=name, id=id, classes=classes, disabled=disabled\n )\n\n\nclass TabbedContent(Widget):\n \"\"\"A container with associated tabs to toggle content visibility.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n TabbedContent {\n height: auto;\n }\n TabbedContent > ContentSwitcher {\n height: auto;\n }\n \"\"\"\n\n active: reactive[str] = reactive(\"\", init=False)\n \"\"\"The ID of the active tab, or empty string if none are active.\"\"\"\n\n class TabActivated(Message):\n \"\"\"Posted when the active tab changes.\"\"\"\n\n def __init__(self, tabbed_content: TabbedContent, tab: Tab) -> None:\n \"\"\"Initialize message.\n\n Args:\n tabbed_content: The TabbedContent widget.\n tab: The Tab widget that was selected (contains the tab label).\n \"\"\"\n self.tabbed_content = tabbed_content\n self.tab = tab\n super().__init__()\n\n def __rich_repr__(self) -> Result:\n yield self.tabbed_content\n yield self.tab\n\n def __init__(self, *titles: TextType, initial: str = \"\") -> None:\n \"\"\"Initialize a TabbedContent widgets.\n\n Args:\n *titles: Positional argument will be used as title.\n initial: The id of the initial tab, or empty string to select the first tab.\n \"\"\"\n self.titles = [self.render_str(title) for title in titles]\n self._tab_content: list[Widget] = []\n self._initial = initial\n super().__init__()\n\n def validate_active(self, active: str) -> str:\n \"\"\"It doesn't make sense for `active` to be an empty string.\n\n Args:\n active: Attribute to be validated.\n\n Returns:\n Value of `active`.\n\n Raises:\n ValueError: If the active attribute is set to empty string.\n \"\"\"\n if not active:\n raise ValueError(\"'active' tab must not be empty string.\")\n return active\n\n def compose(self) -> ComposeResult:\n \"\"\"Compose the tabbed content.\"\"\"\n\n def set_id(content: TabPane, new_id: str) -> TabPane:\n \"\"\"Set an id on the content, if not already present.\n\n Args:\n content: a TabPane.\n new_id: New `is` attribute, if it is not already set.\n\n Returns:\n The same TabPane.\n \"\"\"\n if content.id is None:\n content.id = new_id\n return content\n\n # Wrap content in a `TabPane` if required.\n pane_content = [\n (\n set_id(content, f\"tab-{index}\")\n if isinstance(content, TabPane)\n else TabPane(\n title or self.render_str(f\"Tab {index}\"), content, id=f\"tab-{index}\"\n )\n )\n for index, (title, content) in enumerate(\n zip_longest(self.titles, self._tab_content), 1\n )\n ]\n # Get a tab for each pane\n tabs = [\n ContentTab(content._title, content.id or \"\") for content in pane_content\n ]\n # Yield the tabs\n yield Tabs(*tabs, active=self._initial or None)\n # Yield the content switcher and panes\n with ContentSwitcher(initial=self._initial or None):\n yield from pane_content\n\n def compose_add_child(self, widget: Widget) -> None:\n \"\"\"When using the context manager compose syntax, we want to attach nodes to the switcher.\n\n Args:\n widget: A Widget to add.\n \"\"\"\n self._tab_content.append(widget)\n\n def _on_tabs_tab_activated(self, event: Tabs.TabActivated) -> None:\n \"\"\"User clicked a tab.\"\"\"\n event.stop()\n switcher = self.get_child_by_type(ContentSwitcher)\n assert isinstance(event.tab, ContentTab)\n switcher.current = event.tab.id\n self.active = event.tab.id\n self.post_message(\n TabbedContent.TabActivated(\n tabbed_content=self,\n tab=event.tab,\n )\n )\n\n def _on_tabs_cleared(self, event: Tabs.Cleared) -> None:\n \"\"\"All tabs were removed.\"\"\"\n event.stop()\n\n def watch_active(self, active: str) -> None:\n \"\"\"Switch tabs when the active attributes changes.\"\"\"\n self.get_child_by_type(Tabs).active = active\n", "path": "src/textual/widgets/_tabbed_content.py"}]} | 2,997 | 495 |
gh_patches_debug_27308 | rasdani/github-patches | git_diff | pytorch__text-208 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
text/test/translation.py fails for custom paths
`text/test/translation.py` currently fails on the last section:
```python
train, val = datasets.TranslationDataset.splits(
path='.data/multi30k/', train='train',
validation='val', exts=('.de', '.en'),
fields=(DE, EN))
```
because `splits` expects TranslationDataset.name to be defined, but it isn't.
Possible fix: add `name = ''` to `TranslationDataset`
</issue>
<code>
[start of torchtext/datasets/translation.py]
1 import os
2 import xml.etree.ElementTree as ET
3 import glob
4 import io
5
6 from .. import data
7
8
9 class TranslationDataset(data.Dataset):
10 """Defines a dataset for machine translation."""
11
12 @staticmethod
13 def sort_key(ex):
14 return data.interleave_keys(len(ex.src), len(ex.trg))
15
16 def __init__(self, path, exts, fields, **kwargs):
17 """Create a TranslationDataset given paths and fields.
18
19 Arguments:
20 path: Common prefix of paths to the data files for both languages.
21 exts: A tuple containing the extension to path for each language.
22 fields: A tuple containing the fields that will be used for data
23 in each language.
24 Remaining keyword arguments: Passed to the constructor of
25 data.Dataset.
26 """
27 if not isinstance(fields[0], (tuple, list)):
28 fields = [('src', fields[0]), ('trg', fields[1])]
29
30 src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)
31
32 examples = []
33 with open(src_path) as src_file, open(trg_path) as trg_file:
34 for src_line, trg_line in zip(src_file, trg_file):
35 src_line, trg_line = src_line.strip(), trg_line.strip()
36 if src_line != '' and trg_line != '':
37 examples.append(data.Example.fromlist(
38 [src_line, trg_line], fields))
39
40 super(TranslationDataset, self).__init__(examples, fields, **kwargs)
41
42 @classmethod
43 def splits(cls, exts, fields, root='.data',
44 train='train', validation='val', test='test', **kwargs):
45 """Create dataset objects for splits of a TranslationDataset.
46
47 Arguments:
48
49 root: Root dataset storage directory. Default is '.data'.
50 exts: A tuple containing the extension to path for each language.
51 fields: A tuple containing the fields that will be used for data
52 in each language.
53 train: The prefix of the train data. Default: 'train'.
54 validation: The prefix of the validation data. Default: 'val'.
55 test: The prefix of the test data. Default: 'test'.
56 Remaining keyword arguments: Passed to the splits method of
57 Dataset.
58 """
59 path = cls.download(root)
60
61 train_data = None if train is None else cls(
62 os.path.join(path, train), exts, fields, **kwargs)
63 val_data = None if validation is None else cls(
64 os.path.join(path, validation), exts, fields, **kwargs)
65 test_data = None if test is None else cls(
66 os.path.join(path, test), exts, fields, **kwargs)
67 return tuple(d for d in (train_data, val_data, test_data)
68 if d is not None)
69
70
71 class Multi30k(TranslationDataset):
72 """The small-dataset WMT 2016 multimodal task, also known as Flickr30k"""
73
74 urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',
75 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',
76 'http://www.quest.dcs.shef.ac.uk/'
77 'wmt17_files_mmt/mmt_task1_test2016.tar.gz']
78 name = 'multi30k'
79 dirname = ''
80
81 @classmethod
82 def splits(cls, exts, fields, root='.data',
83 train='train', validation='val', test='test2016', **kwargs):
84 """Create dataset objects for splits of the Multi30k dataset.
85
86 Arguments:
87
88 root: Root dataset storage directory. Default is '.data'.
89 exts: A tuple containing the extension to path for each language.
90 fields: A tuple containing the fields that will be used for data
91 in each language.
92 train: The prefix of the train data. Default: 'train'.
93 validation: The prefix of the validation data. Default: 'val'.
94 test: The prefix of the test data. Default: 'test'.
95 Remaining keyword arguments: Passed to the splits method of
96 Dataset.
97 """
98 return super(Multi30k, cls).splits(
99 exts, fields, root, train, validation, test, **kwargs)
100
101
102 class IWSLT(TranslationDataset):
103 """The IWSLT 2016 TED talk translation task"""
104
105 base_url = 'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz'
106 name = 'iwslt'
107 base_dirname = '{}-{}'
108
109 @classmethod
110 def splits(cls, exts, fields, root='.data',
111 train='train', validation='IWSLT16.TED.tst2013',
112 test='IWSLT16.TED.tst2014', **kwargs):
113 """Create dataset objects for splits of the IWSLT dataset.
114
115 Arguments:
116
117 root: Root dataset storage directory. Default is '.data'.
118 exts: A tuple containing the extension to path for each language.
119 fields: A tuple containing the fields that will be used for data
120 in each language.
121 train: The prefix of the train data. Default: 'train'.
122 validation: The prefix of the validation data. Default: 'val'.
123 test: The prefix of the test data. Default: 'test'.
124 Remaining keyword arguments: Passed to the splits method of
125 Dataset.
126 """
127 cls.dirname = cls.base_dirname.format(exts[0][1:], exts[1][1:])
128 cls.urls = [cls.base_url.format(exts[0][1:], exts[1][1:], cls.dirname)]
129 check = os.path.join(root, cls.name, cls.dirname)
130 path = cls.download(root, check=check)
131
132 train = '.'.join([train, cls.dirname])
133 validation = '.'.join([validation, cls.dirname])
134 if test is not None:
135 test = '.'.join([test, cls.dirname])
136
137 if not os.path.exists(os.path.join(path, train) + exts[0]):
138 cls.clean(path)
139
140 train_data = None if train is None else cls(
141 os.path.join(path, train), exts, fields, **kwargs)
142 val_data = None if validation is None else cls(
143 os.path.join(path, validation), exts, fields, **kwargs)
144 test_data = None if test is None else cls(
145 os.path.join(path, test), exts, fields, **kwargs)
146 return tuple(d for d in (train_data, val_data, test_data)
147 if d is not None)
148
149 @staticmethod
150 def clean(path):
151 for f_xml in glob.iglob(os.path.join(path, '*.xml')):
152 print(f_xml)
153 f_txt = os.path.splitext(f_xml)[0]
154 with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt:
155 root = ET.parse(f_xml).getroot()[0]
156 for doc in root.findall('doc'):
157 for e in doc.findall('seg'):
158 fd_txt.write(e.text.strip() + '\n')
159
160 xml_tags = ['<url', '<keywords', '<talkid', '<description',
161 '<reviewer', '<translator', '<title', '<speaker']
162 for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):
163 print(f_orig)
164 f_txt = f_orig.replace('.tags', '')
165 with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \
166 io.open(f_orig, mode='r', encoding='utf-8') as fd_orig:
167 for l in fd_orig:
168 if not any(tag in l for tag in xml_tags):
169 fd_txt.write(l.strip() + '\n')
170
171
172 class WMT14(TranslationDataset):
173 """The WMT 2014 English-German dataset, as preprocessed by Google Brain.
174
175 Though this download contains test sets from 2015 and 2016, the train set
176 differs slightly from WMT 2015 and 2016 and significantly from WMT 2017."""
177
178 urls = [('https://drive.google.com/uc?export=download&'
179 'id=0B_bZck-ksdkpM25jRUN2X2UxMm8', 'wmt16_en_de.tar.gz')]
180 name = 'wmt14'
181 dirname = ''
182
183 @classmethod
184 def splits(cls, exts, fields, root='.data',
185 train='train.tok.clean.bpe.32000',
186 validation='newstest2013.tok.bpe.32000',
187 test='newstest2014.tok.bpe.32000', **kwargs):
188 """Create dataset objects for splits of the WMT 2014 dataset.
189
190 Arguments:
191
192 root: Root dataset storage directory. Default is '.data'.
193 exts: A tuple containing the extensions for each language. Must be
194 either ('.en', '.de') or the reverse.
195 fields: A tuple containing the fields that will be used for data
196 in each language.
197 train: The prefix of the train data. Default:
198 'train.tok.clean.bpe.32000'.
199 validation: The prefix of the validation data. Default:
200 'newstest2013.tok.bpe.32000'.
201 test: The prefix of the test data. Default:
202 'newstest2014.tok.bpe.32000'.
203 Remaining keyword arguments: Passed to the splits method of
204 Dataset.
205 """
206 return super(WMT14, cls).splits(
207 exts, fields, root, train, validation, test, **kwargs)
208
[end of torchtext/datasets/translation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchtext/datasets/translation.py b/torchtext/datasets/translation.py
--- a/torchtext/datasets/translation.py
+++ b/torchtext/datasets/translation.py
@@ -40,12 +40,13 @@
super(TranslationDataset, self).__init__(examples, fields, **kwargs)
@classmethod
- def splits(cls, exts, fields, root='.data',
+ def splits(cls, exts, fields, path=None, root='.data',
train='train', validation='val', test='test', **kwargs):
"""Create dataset objects for splits of a TranslationDataset.
Arguments:
-
+ path (str): Common prefix of the splits' file paths, or None to use
+ the result of cls.download(root).
root: Root dataset storage directory. Default is '.data'.
exts: A tuple containing the extension to path for each language.
fields: A tuple containing the fields that will be used for data
@@ -56,7 +57,8 @@
Remaining keyword arguments: Passed to the splits method of
Dataset.
"""
- path = cls.download(root)
+ if path is None:
+ path = cls.download(root)
train_data = None if train is None else cls(
os.path.join(path, train), exts, fields, **kwargs)
| {"golden_diff": "diff --git a/torchtext/datasets/translation.py b/torchtext/datasets/translation.py\n--- a/torchtext/datasets/translation.py\n+++ b/torchtext/datasets/translation.py\n@@ -40,12 +40,13 @@\n super(TranslationDataset, self).__init__(examples, fields, **kwargs)\n \n @classmethod\n- def splits(cls, exts, fields, root='.data',\n+ def splits(cls, exts, fields, path=None, root='.data',\n train='train', validation='val', test='test', **kwargs):\n \"\"\"Create dataset objects for splits of a TranslationDataset.\n \n Arguments:\n-\n+ path (str): Common prefix of the splits' file paths, or None to use\n+ the result of cls.download(root).\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n@@ -56,7 +57,8 @@\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n- path = cls.download(root)\n+ if path is None:\n+ path = cls.download(root)\n \n train_data = None if train is None else cls(\n os.path.join(path, train), exts, fields, **kwargs)\n", "issue": "text/test/translation.py fails for custom paths\n`text/test/translation.py` currently fails on the last section:\r\n\r\n```python\r\ntrain, val = datasets.TranslationDataset.splits(\r\n path='.data/multi30k/', train='train',\r\n validation='val', exts=('.de', '.en'),\r\n fields=(DE, EN))\r\n```\r\n\r\nbecause `splits` expects TranslationDataset.name to be defined, but it isn't.\r\nPossible fix: add `name = ''` to `TranslationDataset`\n", "before_files": [{"content": "import os\nimport xml.etree.ElementTree as ET\nimport glob\nimport io\n\nfrom .. import data\n\n\nclass TranslationDataset(data.Dataset):\n \"\"\"Defines a dataset for machine translation.\"\"\"\n\n @staticmethod\n def sort_key(ex):\n return data.interleave_keys(len(ex.src), len(ex.trg))\n\n def __init__(self, path, exts, fields, **kwargs):\n \"\"\"Create a TranslationDataset given paths and fields.\n\n Arguments:\n path: Common prefix of paths to the data files for both languages.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n Remaining keyword arguments: Passed to the constructor of\n data.Dataset.\n \"\"\"\n if not isinstance(fields[0], (tuple, list)):\n fields = [('src', fields[0]), ('trg', fields[1])]\n\n src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)\n\n examples = []\n with open(src_path) as src_file, open(trg_path) as trg_file:\n for src_line, trg_line in zip(src_file, trg_file):\n src_line, trg_line = src_line.strip(), trg_line.strip()\n if src_line != '' and trg_line != '':\n examples.append(data.Example.fromlist(\n [src_line, trg_line], fields))\n\n super(TranslationDataset, self).__init__(examples, fields, **kwargs)\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', validation='val', test='test', **kwargs):\n \"\"\"Create dataset objects for splits of a TranslationDataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n path = cls.download(root)\n\n train_data = None if train is None else cls(\n os.path.join(path, train), exts, fields, **kwargs)\n val_data = None if validation is None else cls(\n os.path.join(path, validation), exts, fields, **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), exts, fields, **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n\nclass Multi30k(TranslationDataset):\n \"\"\"The small-dataset WMT 2016 multimodal task, also known as Flickr30k\"\"\"\n\n urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',\n 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',\n 'http://www.quest.dcs.shef.ac.uk/'\n 'wmt17_files_mmt/mmt_task1_test2016.tar.gz']\n name = 'multi30k'\n dirname = ''\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', validation='val', test='test2016', **kwargs):\n \"\"\"Create dataset objects for splits of the Multi30k dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n return super(Multi30k, cls).splits(\n exts, fields, root, train, validation, test, **kwargs)\n\n\nclass IWSLT(TranslationDataset):\n \"\"\"The IWSLT 2016 TED talk translation task\"\"\"\n\n base_url = 'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz'\n name = 'iwslt'\n base_dirname = '{}-{}'\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', validation='IWSLT16.TED.tst2013',\n test='IWSLT16.TED.tst2014', **kwargs):\n \"\"\"Create dataset objects for splits of the IWSLT dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n cls.dirname = cls.base_dirname.format(exts[0][1:], exts[1][1:])\n cls.urls = [cls.base_url.format(exts[0][1:], exts[1][1:], cls.dirname)]\n check = os.path.join(root, cls.name, cls.dirname)\n path = cls.download(root, check=check)\n\n train = '.'.join([train, cls.dirname])\n validation = '.'.join([validation, cls.dirname])\n if test is not None:\n test = '.'.join([test, cls.dirname])\n\n if not os.path.exists(os.path.join(path, train) + exts[0]):\n cls.clean(path)\n\n train_data = None if train is None else cls(\n os.path.join(path, train), exts, fields, **kwargs)\n val_data = None if validation is None else cls(\n os.path.join(path, validation), exts, fields, **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), exts, fields, **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n @staticmethod\n def clean(path):\n for f_xml in glob.iglob(os.path.join(path, '*.xml')):\n print(f_xml)\n f_txt = os.path.splitext(f_xml)[0]\n with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt:\n root = ET.parse(f_xml).getroot()[0]\n for doc in root.findall('doc'):\n for e in doc.findall('seg'):\n fd_txt.write(e.text.strip() + '\\n')\n\n xml_tags = ['<url', '<keywords', '<talkid', '<description',\n '<reviewer', '<translator', '<title', '<speaker']\n for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):\n print(f_orig)\n f_txt = f_orig.replace('.tags', '')\n with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \\\n io.open(f_orig, mode='r', encoding='utf-8') as fd_orig:\n for l in fd_orig:\n if not any(tag in l for tag in xml_tags):\n fd_txt.write(l.strip() + '\\n')\n\n\nclass WMT14(TranslationDataset):\n \"\"\"The WMT 2014 English-German dataset, as preprocessed by Google Brain.\n\n Though this download contains test sets from 2015 and 2016, the train set\n differs slightly from WMT 2015 and 2016 and significantly from WMT 2017.\"\"\"\n\n urls = [('https://drive.google.com/uc?export=download&'\n 'id=0B_bZck-ksdkpM25jRUN2X2UxMm8', 'wmt16_en_de.tar.gz')]\n name = 'wmt14'\n dirname = ''\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train.tok.clean.bpe.32000',\n validation='newstest2013.tok.bpe.32000',\n test='newstest2014.tok.bpe.32000', **kwargs):\n \"\"\"Create dataset objects for splits of the WMT 2014 dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extensions for each language. Must be\n either ('.en', '.de') or the reverse.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default:\n 'train.tok.clean.bpe.32000'.\n validation: The prefix of the validation data. Default:\n 'newstest2013.tok.bpe.32000'.\n test: The prefix of the test data. Default:\n 'newstest2014.tok.bpe.32000'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n return super(WMT14, cls).splits(\n exts, fields, root, train, validation, test, **kwargs)\n", "path": "torchtext/datasets/translation.py"}]} | 3,319 | 299 |
gh_patches_debug_35444 | rasdani/github-patches | git_diff | InstaPy__InstaPy-831 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot focus element error
I'm running the script on mac and after some time i get this error:
Message: unknown error: cannot focus element
(Session info: chrome=61.0.3163.100)
(Driver info: chromedriver=2.32.498537 (cb2f855cbc7b82e20387eaf9a43f6b99b6105061),platform=Mac OS X 10.12.3 x86_64)
Now I'm trying to update chromedriver with all of the packages to check whether they are the reason, but does anybody else get this error?
</issue>
<code>
[start of instapy/comment_util.py]
1 # -*- coding: utf-8 -*-
2 """Module which handles the commenting features"""
3 from random import choice
4 from .time_util import sleep
5 import emoji
6
7
8 def comment_image(browser, comments):
9 """Checks if it should comment on the image"""
10 rand_comment = (choice(comments))
11 rand_comment = emoji.demojize(rand_comment)
12 rand_comment = emoji.emojize(rand_comment, use_aliases=True)
13
14 comment_input = browser.find_elements_by_xpath(
15 '//textarea[@placeholder = "Add a comment…"]')
16 if len(comment_input) <= 0:
17 comment_input = browser.find_elements_by_xpath(
18 '//input[@placeholder = "Add a comment…"]')
19
20 if len(comment_input) > 0:
21 browser.execute_script(
22 "arguments[0].value = '" + rand_comment + " ';", comment_input[0])
23 # An extra space is added here and then deleted.
24 # This forces the input box to update the reactJS core
25 comment_input[0].send_keys("\b")
26 comment_input[0].submit()
27 else:
28 print('--> Warning: Comment Action Likely Failed:'
29 ' Comment Element not found')
30
31 print("--> Commented: {}".format(rand_comment.encode('utf-8')))
32 sleep(2)
33
34 return 1
35
[end of instapy/comment_util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/instapy/comment_util.py b/instapy/comment_util.py
--- a/instapy/comment_util.py
+++ b/instapy/comment_util.py
@@ -2,8 +2,31 @@
"""Module which handles the commenting features"""
from random import choice
from .time_util import sleep
+from selenium.common.exceptions import WebDriverException
import emoji
+def get_comment_input(browser):
+ comment_input = browser.find_elements_by_xpath(
+ '//textarea[@placeholder = "Add a comment…"]')
+ if len(comment_input) <= 0:
+ comment_input = browser.find_elements_by_xpath(
+ '//input[@placeholder = "Add a comment…"]')
+ return comment_input
+
+def open_comment_section(browser):
+ missing_comment_elem_warning = (
+ '--> Warning: Comment Button Not Found:'
+ ' May cause issues with browser windows of smaller widths')
+ comment_elem = browser.find_elements_by_xpath(
+ "//a[@role='button']/span[text()='Comment']/..")
+ if len(comment_elem) > 0:
+ try:
+ browser.execute_script(
+ "arguments[0].click();", comment_elem[0])
+ except WebDriverException:
+ print(missing_comment_elem_warning)
+ else:
+ print(missing_comment_elem_warning)
def comment_image(browser, comments):
"""Checks if it should comment on the image"""
@@ -11,18 +34,19 @@
rand_comment = emoji.demojize(rand_comment)
rand_comment = emoji.emojize(rand_comment, use_aliases=True)
- comment_input = browser.find_elements_by_xpath(
- '//textarea[@placeholder = "Add a comment…"]')
- if len(comment_input) <= 0:
- comment_input = browser.find_elements_by_xpath(
- '//input[@placeholder = "Add a comment…"]')
+ open_comment_section(browser)
+ comment_input = get_comment_input(browser)
if len(comment_input) > 0:
+ comment_input[0].clear()
+ comment_input = get_comment_input(browser)
+
browser.execute_script(
"arguments[0].value = '" + rand_comment + " ';", comment_input[0])
# An extra space is added here and then deleted.
# This forces the input box to update the reactJS core
comment_input[0].send_keys("\b")
+ comment_input = get_comment_input(browser)
comment_input[0].submit()
else:
print('--> Warning: Comment Action Likely Failed:'
| {"golden_diff": "diff --git a/instapy/comment_util.py b/instapy/comment_util.py\n--- a/instapy/comment_util.py\n+++ b/instapy/comment_util.py\n@@ -2,8 +2,31 @@\n \"\"\"Module which handles the commenting features\"\"\"\n from random import choice\n from .time_util import sleep\n+from selenium.common.exceptions import WebDriverException\n import emoji\n \n+def get_comment_input(browser):\n+ comment_input = browser.find_elements_by_xpath(\n+ '//textarea[@placeholder = \"Add a comment\u2026\"]')\n+ if len(comment_input) <= 0:\n+ comment_input = browser.find_elements_by_xpath(\n+ '//input[@placeholder = \"Add a comment\u2026\"]')\n+ return comment_input\n+\n+def open_comment_section(browser):\n+ missing_comment_elem_warning = (\n+ '--> Warning: Comment Button Not Found:'\n+ ' May cause issues with browser windows of smaller widths')\n+ comment_elem = browser.find_elements_by_xpath(\n+ \"//a[@role='button']/span[text()='Comment']/..\")\n+ if len(comment_elem) > 0:\n+ try:\n+ browser.execute_script(\n+ \"arguments[0].click();\", comment_elem[0])\n+ except WebDriverException:\n+ print(missing_comment_elem_warning)\n+ else:\n+ print(missing_comment_elem_warning)\n \n def comment_image(browser, comments):\n \"\"\"Checks if it should comment on the image\"\"\"\n@@ -11,18 +34,19 @@\n rand_comment = emoji.demojize(rand_comment)\n rand_comment = emoji.emojize(rand_comment, use_aliases=True)\n \n- comment_input = browser.find_elements_by_xpath(\n- '//textarea[@placeholder = \"Add a comment\u2026\"]')\n- if len(comment_input) <= 0:\n- comment_input = browser.find_elements_by_xpath(\n- '//input[@placeholder = \"Add a comment\u2026\"]')\n+ open_comment_section(browser)\n+ comment_input = get_comment_input(browser)\n \n if len(comment_input) > 0:\n+ comment_input[0].clear()\n+ comment_input = get_comment_input(browser)\n+\n browser.execute_script(\n \"arguments[0].value = '\" + rand_comment + \" ';\", comment_input[0])\n # An extra space is added here and then deleted.\n # This forces the input box to update the reactJS core\n comment_input[0].send_keys(\"\\b\")\n+ comment_input = get_comment_input(browser)\n comment_input[0].submit()\n else:\n print('--> Warning: Comment Action Likely Failed:'\n", "issue": "Cannot focus element error\nI'm running the script on mac and after some time i get this error:\r\n\r\nMessage: unknown error: cannot focus element\r\n (Session info: chrome=61.0.3163.100)\r\n (Driver info: chromedriver=2.32.498537 (cb2f855cbc7b82e20387eaf9a43f6b99b6105061),platform=Mac OS X 10.12.3 x86_64)\r\n\r\nNow I'm trying to update chromedriver with all of the packages to check whether they are the reason, but does anybody else get this error?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Module which handles the commenting features\"\"\"\nfrom random import choice\nfrom .time_util import sleep\nimport emoji\n\n\ndef comment_image(browser, comments):\n \"\"\"Checks if it should comment on the image\"\"\"\n rand_comment = (choice(comments))\n rand_comment = emoji.demojize(rand_comment)\n rand_comment = emoji.emojize(rand_comment, use_aliases=True)\n\n comment_input = browser.find_elements_by_xpath(\n '//textarea[@placeholder = \"Add a comment\u2026\"]')\n if len(comment_input) <= 0:\n comment_input = browser.find_elements_by_xpath(\n '//input[@placeholder = \"Add a comment\u2026\"]')\n\n if len(comment_input) > 0:\n browser.execute_script(\n \"arguments[0].value = '\" + rand_comment + \" ';\", comment_input[0])\n # An extra space is added here and then deleted.\n # This forces the input box to update the reactJS core\n comment_input[0].send_keys(\"\\b\")\n comment_input[0].submit()\n else:\n print('--> Warning: Comment Action Likely Failed:'\n ' Comment Element not found')\n\n print(\"--> Commented: {}\".format(rand_comment.encode('utf-8')))\n sleep(2)\n\n return 1\n", "path": "instapy/comment_util.py"}]} | 1,029 | 549 |
gh_patches_debug_14352 | rasdani/github-patches | git_diff | gratipay__gratipay.com-2429 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
preclude adding stub participants as team members
Reticketed from #2362.
preclude adding stub participants as team members
Reticketed from #2362.
</issue>
<code>
[start of gittip/models/_mixin_team.py]
1 """Teams on Gittip are plural participants with members.
2 """
3 from decimal import Decimal
4
5 from aspen.utils import typecheck
6
7
8 class MemberLimitReached(Exception): pass
9
10
11 class MixinTeam(object):
12 """This class provides methods for working with a Participant as a Team.
13
14 :param Participant participant: the underlying :py:class:`~gittip.participant.Participant` object for this team
15
16 """
17
18 # XXX These were all written with the ORM and need to be converted.
19
20 def __init__(self, participant):
21 self.participant = participant
22
23 def show_as_team(self, user):
24 """Return a boolean, whether to show this participant as a team.
25 """
26 if not self.IS_PLURAL:
27 return False
28 if user.ADMIN:
29 return True
30 if not self.get_takes():
31 if self == user.participant:
32 return True
33 return False
34 return True
35
36 def add_member(self, member):
37 """Add a member to this team.
38 """
39 assert self.IS_PLURAL
40 if len(self.get_takes()) == 149:
41 raise MemberLimitReached
42 self.__set_take_for(member, Decimal('0.01'), self)
43
44 def remove_member(self, member):
45 """Remove a member from this team.
46 """
47 assert self.IS_PLURAL
48 self.__set_take_for(member, Decimal('0.00'), self)
49
50 def member_of(self, team):
51 """Given a Participant object, return a boolean.
52 """
53 assert team.IS_PLURAL
54 for take in team.get_takes():
55 if take['member'] == self.username:
56 return True
57 return False
58
59 def get_take_last_week_for(self, member):
60 """What did the user actually take most recently? Used in throttling.
61 """
62 assert self.IS_PLURAL
63 membername = member.username if hasattr(member, 'username') \
64 else member['username']
65 return self.db.one("""
66
67 SELECT amount
68 FROM transfers
69 WHERE tipper=%s AND tippee=%s
70 AND timestamp >
71 (SELECT ts_start FROM paydays ORDER BY ts_start DESC LIMIT 1)
72 ORDER BY timestamp DESC LIMIT 1
73
74 """, (self.username, membername), default=Decimal('0.00'))
75
76 def get_take_for(self, member):
77 """Return a Decimal representation of the take for this member, or 0.
78 """
79 assert self.IS_PLURAL
80 return self.db.one( "SELECT amount FROM current_takes "
81 "WHERE member=%s AND team=%s"
82 , (member.username, self.username)
83 , default=Decimal('0.00')
84 )
85
86 def compute_max_this_week(self, last_week):
87 """2x last week's take, but at least a dollar.
88 """
89 return max(last_week * Decimal('2'), Decimal('1.00'))
90
91 def set_take_for(self, member, take, recorder):
92 """Sets member's take from the team pool.
93 """
94 assert self.IS_PLURAL
95
96 # lazy import to avoid circular import
97 from gittip.security.user import User
98 from gittip.models.participant import Participant
99
100 typecheck( member, Participant
101 , take, Decimal
102 , recorder, (Participant, User)
103 )
104
105 last_week = self.get_take_last_week_for(member)
106 max_this_week = self.compute_max_this_week(last_week)
107 if take > max_this_week:
108 take = max_this_week
109
110 self.__set_take_for(member, take, recorder)
111 return take
112
113 def __set_take_for(self, member, amount, recorder):
114 assert self.IS_PLURAL
115 # XXX Factored out for testing purposes only! :O Use .set_take_for.
116 self.db.run("""
117
118 INSERT INTO takes (ctime, member, team, amount, recorder)
119 VALUES ( COALESCE (( SELECT ctime
120 FROM takes
121 WHERE member=%s
122 AND team=%s
123 LIMIT 1
124 ), CURRENT_TIMESTAMP)
125 , %s
126 , %s
127 , %s
128 , %s
129 )
130
131 """, (member.username, self.username, member.username, self.username, \
132 amount, recorder.username))
133
134 def get_takes(self, for_payday=False):
135 """Return a list of member takes for a team.
136
137 This is implemented parallel to Participant.get_tips_and_total. See
138 over there for an explanation of for_payday.
139
140 """
141 assert self.IS_PLURAL
142
143 args = dict(team=self.username)
144
145 if for_payday:
146 args['ts_start'] = for_payday
147
148 # Get the takes for this team, as they were before ts_start,
149 # filtering out the ones we've already transferred (in case payday
150 # is interrupted and restarted).
151
152 TAKES = """\
153
154 SELECT * FROM (
155 SELECT DISTINCT ON (member) t.*
156 FROM takes t
157 JOIN participants p ON p.username = member
158 WHERE team=%(team)s
159 AND mtime < %(ts_start)s
160 AND p.is_suspicious IS NOT true
161 AND ( SELECT id
162 FROM transfers
163 WHERE tipper=t.team
164 AND tippee=t.member
165 AND as_team_member IS true
166 AND timestamp >= %(ts_start)s
167 ) IS NULL
168 ORDER BY member, mtime DESC
169 ) AS foo
170 ORDER BY ctime DESC
171
172 """
173 else:
174 TAKES = """\
175
176 SELECT member, amount, ctime, mtime
177 FROM current_takes
178 WHERE team=%(team)s
179 ORDER BY ctime DESC
180
181 """
182
183 return self.db.all(TAKES, args, back_as=dict)
184
185 def get_team_take(self):
186 """Return a single take for a team, the team itself's take.
187 """
188 assert self.IS_PLURAL
189 TAKE = "SELECT sum(amount) FROM current_takes WHERE team=%s"
190 total_take = self.db.one(TAKE, (self.username,), default=0)
191 team_take = max(self.get_dollars_receiving() - total_take, 0)
192 membership = { "ctime": None
193 , "mtime": None
194 , "member": self.username
195 , "amount": team_take
196 }
197 return membership
198
199 def get_members(self, current_participant):
200 """Return a list of member dicts.
201 """
202 assert self.IS_PLURAL
203 takes = self.get_takes()
204 takes.append(self.get_team_take())
205 budget = balance = self.get_dollars_receiving()
206 members = []
207 for take in takes:
208 member = {}
209 member['username'] = take['member']
210 member['take'] = take['amount']
211
212 member['removal_allowed'] = current_participant == self
213 member['editing_allowed'] = False
214 member['is_current_user'] = False
215 if current_participant is not None:
216 if member['username'] == current_participant.username:
217 member['is_current_user'] = True
218 if take['ctime'] is not None:
219 # current user, but not the team itself
220 member['editing_allowed']= True
221
222 member['last_week'] = last_week = self.get_take_last_week_for(member)
223 member['max_this_week'] = self.compute_max_this_week(last_week)
224 amount = min(take['amount'], balance)
225 balance -= amount
226 member['balance'] = balance
227 member['percentage'] = (amount / budget) if budget > 0 else 0
228 members.append(member)
229 return members
230
[end of gittip/models/_mixin_team.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gittip/models/_mixin_team.py b/gittip/models/_mixin_team.py
--- a/gittip/models/_mixin_team.py
+++ b/gittip/models/_mixin_team.py
@@ -7,6 +7,7 @@
class MemberLimitReached(Exception): pass
+class StubParticipantAdded(Exception): pass
class MixinTeam(object):
"""This class provides methods for working with a Participant as a Team.
@@ -39,6 +40,8 @@
assert self.IS_PLURAL
if len(self.get_takes()) == 149:
raise MemberLimitReached
+ if not member.is_claimed:
+ raise StubParticipantAdded
self.__set_take_for(member, Decimal('0.01'), self)
def remove_member(self, member):
| {"golden_diff": "diff --git a/gittip/models/_mixin_team.py b/gittip/models/_mixin_team.py\n--- a/gittip/models/_mixin_team.py\n+++ b/gittip/models/_mixin_team.py\n@@ -7,6 +7,7 @@\n \n class MemberLimitReached(Exception): pass\n \n+class StubParticipantAdded(Exception): pass\n \n class MixinTeam(object):\n \"\"\"This class provides methods for working with a Participant as a Team.\n@@ -39,6 +40,8 @@\n assert self.IS_PLURAL\n if len(self.get_takes()) == 149:\n raise MemberLimitReached\n+ if not member.is_claimed:\n+ raise StubParticipantAdded\n self.__set_take_for(member, Decimal('0.01'), self)\n \n def remove_member(self, member):\n", "issue": "preclude adding stub participants as team members\nReticketed from #2362.\n\npreclude adding stub participants as team members\nReticketed from #2362.\n\n", "before_files": [{"content": "\"\"\"Teams on Gittip are plural participants with members.\n\"\"\"\nfrom decimal import Decimal\n\nfrom aspen.utils import typecheck\n\n\nclass MemberLimitReached(Exception): pass\n\n\nclass MixinTeam(object):\n \"\"\"This class provides methods for working with a Participant as a Team.\n\n :param Participant participant: the underlying :py:class:`~gittip.participant.Participant` object for this team\n\n \"\"\"\n\n # XXX These were all written with the ORM and need to be converted.\n\n def __init__(self, participant):\n self.participant = participant\n\n def show_as_team(self, user):\n \"\"\"Return a boolean, whether to show this participant as a team.\n \"\"\"\n if not self.IS_PLURAL:\n return False\n if user.ADMIN:\n return True\n if not self.get_takes():\n if self == user.participant:\n return True\n return False\n return True\n\n def add_member(self, member):\n \"\"\"Add a member to this team.\n \"\"\"\n assert self.IS_PLURAL\n if len(self.get_takes()) == 149:\n raise MemberLimitReached\n self.__set_take_for(member, Decimal('0.01'), self)\n\n def remove_member(self, member):\n \"\"\"Remove a member from this team.\n \"\"\"\n assert self.IS_PLURAL\n self.__set_take_for(member, Decimal('0.00'), self)\n\n def member_of(self, team):\n \"\"\"Given a Participant object, return a boolean.\n \"\"\"\n assert team.IS_PLURAL\n for take in team.get_takes():\n if take['member'] == self.username:\n return True\n return False\n\n def get_take_last_week_for(self, member):\n \"\"\"What did the user actually take most recently? Used in throttling.\n \"\"\"\n assert self.IS_PLURAL\n membername = member.username if hasattr(member, 'username') \\\n else member['username']\n return self.db.one(\"\"\"\n\n SELECT amount\n FROM transfers\n WHERE tipper=%s AND tippee=%s\n AND timestamp >\n (SELECT ts_start FROM paydays ORDER BY ts_start DESC LIMIT 1)\n ORDER BY timestamp DESC LIMIT 1\n\n \"\"\", (self.username, membername), default=Decimal('0.00'))\n\n def get_take_for(self, member):\n \"\"\"Return a Decimal representation of the take for this member, or 0.\n \"\"\"\n assert self.IS_PLURAL\n return self.db.one( \"SELECT amount FROM current_takes \"\n \"WHERE member=%s AND team=%s\"\n , (member.username, self.username)\n , default=Decimal('0.00')\n )\n\n def compute_max_this_week(self, last_week):\n \"\"\"2x last week's take, but at least a dollar.\n \"\"\"\n return max(last_week * Decimal('2'), Decimal('1.00'))\n\n def set_take_for(self, member, take, recorder):\n \"\"\"Sets member's take from the team pool.\n \"\"\"\n assert self.IS_PLURAL\n\n # lazy import to avoid circular import\n from gittip.security.user import User\n from gittip.models.participant import Participant\n\n typecheck( member, Participant\n , take, Decimal\n , recorder, (Participant, User)\n )\n\n last_week = self.get_take_last_week_for(member)\n max_this_week = self.compute_max_this_week(last_week)\n if take > max_this_week:\n take = max_this_week\n\n self.__set_take_for(member, take, recorder)\n return take\n\n def __set_take_for(self, member, amount, recorder):\n assert self.IS_PLURAL\n # XXX Factored out for testing purposes only! :O Use .set_take_for.\n self.db.run(\"\"\"\n\n INSERT INTO takes (ctime, member, team, amount, recorder)\n VALUES ( COALESCE (( SELECT ctime\n FROM takes\n WHERE member=%s\n AND team=%s\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %s\n , %s\n , %s\n , %s\n )\n\n \"\"\", (member.username, self.username, member.username, self.username, \\\n amount, recorder.username))\n\n def get_takes(self, for_payday=False):\n \"\"\"Return a list of member takes for a team.\n\n This is implemented parallel to Participant.get_tips_and_total. See\n over there for an explanation of for_payday.\n\n \"\"\"\n assert self.IS_PLURAL\n\n args = dict(team=self.username)\n\n if for_payday:\n args['ts_start'] = for_payday\n\n # Get the takes for this team, as they were before ts_start,\n # filtering out the ones we've already transferred (in case payday\n # is interrupted and restarted).\n\n TAKES = \"\"\"\\\n\n SELECT * FROM (\n SELECT DISTINCT ON (member) t.*\n FROM takes t\n JOIN participants p ON p.username = member\n WHERE team=%(team)s\n AND mtime < %(ts_start)s\n AND p.is_suspicious IS NOT true\n AND ( SELECT id\n FROM transfers\n WHERE tipper=t.team\n AND tippee=t.member\n AND as_team_member IS true\n AND timestamp >= %(ts_start)s\n ) IS NULL\n ORDER BY member, mtime DESC\n ) AS foo\n ORDER BY ctime DESC\n\n \"\"\"\n else:\n TAKES = \"\"\"\\\n\n SELECT member, amount, ctime, mtime\n FROM current_takes\n WHERE team=%(team)s\n ORDER BY ctime DESC\n\n \"\"\"\n\n return self.db.all(TAKES, args, back_as=dict)\n\n def get_team_take(self):\n \"\"\"Return a single take for a team, the team itself's take.\n \"\"\"\n assert self.IS_PLURAL\n TAKE = \"SELECT sum(amount) FROM current_takes WHERE team=%s\"\n total_take = self.db.one(TAKE, (self.username,), default=0)\n team_take = max(self.get_dollars_receiving() - total_take, 0)\n membership = { \"ctime\": None\n , \"mtime\": None\n , \"member\": self.username\n , \"amount\": team_take\n }\n return membership\n\n def get_members(self, current_participant):\n \"\"\"Return a list of member dicts.\n \"\"\"\n assert self.IS_PLURAL\n takes = self.get_takes()\n takes.append(self.get_team_take())\n budget = balance = self.get_dollars_receiving()\n members = []\n for take in takes:\n member = {}\n member['username'] = take['member']\n member['take'] = take['amount']\n\n member['removal_allowed'] = current_participant == self\n member['editing_allowed'] = False\n member['is_current_user'] = False\n if current_participant is not None:\n if member['username'] == current_participant.username:\n member['is_current_user'] = True\n if take['ctime'] is not None:\n # current user, but not the team itself\n member['editing_allowed']= True\n\n member['last_week'] = last_week = self.get_take_last_week_for(member)\n member['max_this_week'] = self.compute_max_this_week(last_week)\n amount = min(take['amount'], balance)\n balance -= amount\n member['balance'] = balance\n member['percentage'] = (amount / budget) if budget > 0 else 0\n members.append(member)\n return members\n", "path": "gittip/models/_mixin_team.py"}]} | 2,811 | 178 |
gh_patches_debug_24835 | rasdani/github-patches | git_diff | napari__napari-589 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
novel gene discovery in Napari (a.k.a. imperfect layer name incrementing)
## 🐛 Bug
napari does a smart thing and adds an integer to the name of a layer if the name is already in use in the viewer. It then increments that number when another layer is added that fits the pattern. This is great until you have layer names that end in numbers

which in this case results in different names that are sometimes real genes and sometimes not.
## To Reproduce
Steps to reproduce the behavior:
```
v = napari.Viewer()
for i in range(3):
v.add_points(1000*np.random.rand(10,2), name = "no_problem")
for i in range(3):
v.add_points(1000*np.random.rand(10,2), name = "problem:GAD1")
```
## Expected behavior
consistent use of a space or other character to separate the auto-increment integer from the original string
</issue>
<code>
[start of napari/util/naming.py]
1 """Automatically generate names.
2 """
3 import re
4 from .misc import formatdoc
5
6
7 sep = ' '
8 start = 1
9
10 numbered_patt = re.compile(r'(?<!\d)(?:\d+|)$')
11
12
13 def _inc_name_count_sub(match):
14 count = match.group(0)
15
16 try:
17 count = int(count)
18 except ValueError: # not an int
19 count = f'{sep}{start}'
20 else:
21 count = f'{count + 1}'
22
23 return count
24
25
26 @formatdoc
27 def inc_name_count(name):
28 """Increase a name's count matching `{numbered_patt}` by ``1``.
29
30 If the name is not already numbered, append '{sep}{start}'.
31
32 Parameters
33 ----------
34 name : str
35 Original name.
36
37 Returns
38 -------
39 incremented_name : str
40 Numbered name incremented by ``1``.
41 """
42 return numbered_patt.sub(_inc_name_count_sub, name)
43
[end of napari/util/naming.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/napari/util/naming.py b/napari/util/naming.py
--- a/napari/util/naming.py
+++ b/napari/util/naming.py
@@ -7,7 +7,9 @@
sep = ' '
start = 1
-numbered_patt = re.compile(r'(?<!\d)(?:\d+|)$')
+# Match integer between square brackets at end of string if after space
+# or at beginning of string or just match end of string
+numbered_patt = re.compile(r'((?<=\A\[)|(?<=\s\[))(?:\d+|)(?=\]$)|$')
def _inc_name_count_sub(match):
@@ -16,7 +18,7 @@
try:
count = int(count)
except ValueError: # not an int
- count = f'{sep}{start}'
+ count = f'{sep}[{start}]'
else:
count = f'{count + 1}'
@@ -27,7 +29,7 @@
def inc_name_count(name):
"""Increase a name's count matching `{numbered_patt}` by ``1``.
- If the name is not already numbered, append '{sep}{start}'.
+ If the name is not already numbered, append '{sep}[{start}]'.
Parameters
----------
@@ -39,4 +41,4 @@
incremented_name : str
Numbered name incremented by ``1``.
"""
- return numbered_patt.sub(_inc_name_count_sub, name)
+ return numbered_patt.sub(_inc_name_count_sub, name, count=1)
| {"golden_diff": "diff --git a/napari/util/naming.py b/napari/util/naming.py\n--- a/napari/util/naming.py\n+++ b/napari/util/naming.py\n@@ -7,7 +7,9 @@\n sep = ' '\n start = 1\n \n-numbered_patt = re.compile(r'(?<!\\d)(?:\\d+|)$')\n+# Match integer between square brackets at end of string if after space\n+# or at beginning of string or just match end of string\n+numbered_patt = re.compile(r'((?<=\\A\\[)|(?<=\\s\\[))(?:\\d+|)(?=\\]$)|$')\n \n \n def _inc_name_count_sub(match):\n@@ -16,7 +18,7 @@\n try:\n count = int(count)\n except ValueError: # not an int\n- count = f'{sep}{start}'\n+ count = f'{sep}[{start}]'\n else:\n count = f'{count + 1}'\n \n@@ -27,7 +29,7 @@\n def inc_name_count(name):\n \"\"\"Increase a name's count matching `{numbered_patt}` by ``1``.\n \n- If the name is not already numbered, append '{sep}{start}'.\n+ If the name is not already numbered, append '{sep}[{start}]'.\n \n Parameters\n ----------\n@@ -39,4 +41,4 @@\n incremented_name : str\n Numbered name incremented by ``1``.\n \"\"\"\n- return numbered_patt.sub(_inc_name_count_sub, name)\n+ return numbered_patt.sub(_inc_name_count_sub, name, count=1)\n", "issue": "novel gene discovery in Napari (a.k.a. imperfect layer name incrementing)\n## \ud83d\udc1b Bug\r\nnapari does a smart thing and adds an integer to the name of a layer if the name is already in use in the viewer. It then increments that number when another layer is added that fits the pattern. This is great until you have layer names that end in numbers \r\n\r\nwhich in this case results in different names that are sometimes real genes and sometimes not.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n```\r\nv = napari.Viewer()\r\nfor i in range(3):\r\n v.add_points(1000*np.random.rand(10,2), name = \"no_problem\")\r\nfor i in range(3):\r\n v.add_points(1000*np.random.rand(10,2), name = \"problem:GAD1\")\r\n```\r\n\r\n## Expected behavior\r\nconsistent use of a space or other character to separate the auto-increment integer from the original string\r\n\n", "before_files": [{"content": "\"\"\"Automatically generate names.\n\"\"\"\nimport re\nfrom .misc import formatdoc\n\n\nsep = ' '\nstart = 1\n\nnumbered_patt = re.compile(r'(?<!\\d)(?:\\d+|)$')\n\n\ndef _inc_name_count_sub(match):\n count = match.group(0)\n\n try:\n count = int(count)\n except ValueError: # not an int\n count = f'{sep}{start}'\n else:\n count = f'{count + 1}'\n\n return count\n\n\n@formatdoc\ndef inc_name_count(name):\n \"\"\"Increase a name's count matching `{numbered_patt}` by ``1``.\n\n If the name is not already numbered, append '{sep}{start}'.\n\n Parameters\n ----------\n name : str\n Original name.\n\n Returns\n -------\n incremented_name : str\n Numbered name incremented by ``1``.\n \"\"\"\n return numbered_patt.sub(_inc_name_count_sub, name)\n", "path": "napari/util/naming.py"}]} | 1,120 | 361 |
gh_patches_debug_1164 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1907 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
German language is not appropiate used when using Confirmprompts
### The Issue
I am building a chatbot for german users. I am sending the local "de-de" as user, and can confirm this actual arrives the bot. When i want to use Confirmprompts the bot returns Yes and No and not "Ja" "Nein".
### The Solution
After a lot of digging, I found the underlying cause and a fix. The culture model does not actually recognices German (de-de) as supported language, and thus switches to the default (english). But in the prompt_culture_models.py German actualy exists and ther is a todo "# TODO: Replace with Culture.German after Recognizers-Text package updates." Which I looked up and the Recognizers-Text package sis already updated :) . Still this is not the real issue.
The reason is that german is not listed in the supported cultures function. I simply added it and every thing works fine.
` @classmethod
def get_supported_cultures(cls) -> List[PromptCultureModel]:
"""
Gets a list of the supported culture models.
"""
return [
cls.Chinese,
cls.German,
cls.Dutch,
cls.English,
cls.French,
cls.Italian,
cls.Japanese,
cls.Korean,
cls.Portuguese,
cls.Spanish,
cls.Turkish,
]`
</issue>
<code>
[start of libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from typing import List
5
6 from recognizers_text import Culture
7
8
9 class PromptCultureModel:
10 """
11 Culture model used in Choice and Confirm Prompts.
12 """
13
14 def __init__(
15 self,
16 locale: str,
17 separator: str,
18 inline_or: str,
19 inline_or_more: str,
20 yes_in_language: str,
21 no_in_language: str,
22 ):
23 """
24
25 :param locale: Culture Model's Locale. Example: "en-US".
26 :param separator: Culture Model's Inline Separator. Example: ", ".
27 :param inline_or: Culture Model's Inline Or. Example: " or ".
28 :param inline_or_more Culture Model's Inline Or More. Example: ", or ".
29 :param yes_in_language: Equivalent of "Yes" in Culture Model's Language. Example: "Yes".
30 :param no_in_language: Equivalent of "No" in Culture Model's Language. Example: "No".
31 """
32 self.locale = locale
33 self.separator = separator
34 self.inline_or = inline_or
35 self.inline_or_more = inline_or_more
36 self.yes_in_language = yes_in_language
37 self.no_in_language = no_in_language
38
39
40 class PromptCultureModels:
41 """
42 Class container for currently-supported Culture Models in Confirm and Choice Prompt.
43 """
44
45 Chinese = PromptCultureModel(
46 locale=Culture.Chinese,
47 inline_or=" 要么 ",
48 inline_or_more=", 要么 ",
49 separator=", ",
50 no_in_language="不",
51 yes_in_language="是的",
52 )
53
54 Dutch = PromptCultureModel(
55 locale=Culture.Dutch,
56 inline_or=" of ",
57 inline_or_more=", of ",
58 separator=", ",
59 no_in_language="Nee",
60 yes_in_language="Ja",
61 )
62
63 English = PromptCultureModel(
64 locale=Culture.English,
65 inline_or=" or ",
66 inline_or_more=", or ",
67 separator=", ",
68 no_in_language="No",
69 yes_in_language="Yes",
70 )
71
72 French = PromptCultureModel(
73 locale=Culture.French,
74 inline_or=" ou ",
75 inline_or_more=", ou ",
76 separator=", ",
77 no_in_language="Non",
78 yes_in_language="Oui",
79 )
80
81 German = PromptCultureModel(
82 # TODO: Replace with Culture.German after Recognizers-Text package updates.
83 locale="de-de",
84 inline_or=" oder ",
85 inline_or_more=", oder ",
86 separator=", ",
87 no_in_language="Nein",
88 yes_in_language="Ja",
89 )
90
91 Italian = PromptCultureModel(
92 locale=Culture.Italian,
93 inline_or=" o ",
94 inline_or_more=" o ",
95 separator=", ",
96 no_in_language="No",
97 yes_in_language="Si",
98 )
99
100 Japanese = PromptCultureModel(
101 locale=Culture.Japanese,
102 inline_or=" または ",
103 inline_or_more="、 または ",
104 separator="、 ",
105 no_in_language="いいえ",
106 yes_in_language="はい",
107 )
108
109 Korean = PromptCultureModel(
110 locale=Culture.Korean,
111 inline_or=" 또는 ",
112 inline_or_more=" 또는 ",
113 separator=", ",
114 no_in_language="아니",
115 yes_in_language="예",
116 )
117
118 Portuguese = PromptCultureModel(
119 locale=Culture.Portuguese,
120 inline_or=" ou ",
121 inline_or_more=", ou ",
122 separator=", ",
123 no_in_language="Não",
124 yes_in_language="Sim",
125 )
126
127 Spanish = PromptCultureModel(
128 locale=Culture.Spanish,
129 inline_or=" o ",
130 inline_or_more=", o ",
131 separator=", ",
132 no_in_language="No",
133 yes_in_language="Sí",
134 )
135
136 Turkish = PromptCultureModel(
137 locale=Culture.Turkish,
138 inline_or=" veya ",
139 inline_or_more=" veya ",
140 separator=", ",
141 no_in_language="Hayır",
142 yes_in_language="Evet",
143 )
144
145 @classmethod
146 def map_to_nearest_language(cls, culture_code: str) -> str:
147 """
148 Normalize various potential locale strings to a standard.
149 :param culture_code: Represents locale. Examples: "en-US, en-us, EN".
150 :return: Normalized locale.
151 :rtype: str
152
153 .. remarks::
154 In our other SDKs, this method is a copy/paste of the ones from the Recognizers-Text library.
155 However, that doesn't exist in Python.
156 """
157 if culture_code:
158 culture_code = culture_code.lower()
159 supported_culture_codes = cls._get_supported_locales()
160
161 if culture_code not in supported_culture_codes:
162 culture_prefix = culture_code.split("-")[0]
163
164 for supported_culture_code in supported_culture_codes:
165 if supported_culture_code.startswith(culture_prefix):
166 culture_code = supported_culture_code
167
168 return culture_code
169
170 @classmethod
171 def get_supported_cultures(cls) -> List[PromptCultureModel]:
172 """
173 Gets a list of the supported culture models.
174 """
175 return [
176 cls.Chinese,
177 cls.Dutch,
178 cls.English,
179 cls.French,
180 cls.Italian,
181 cls.Japanese,
182 cls.Korean,
183 cls.Portuguese,
184 cls.Spanish,
185 cls.Turkish,
186 ]
187
188 @classmethod
189 def _get_supported_locales(cls) -> List[str]:
190 return [c.locale for c in cls.get_supported_cultures()]
191
[end of libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py
--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py
+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py
@@ -174,6 +174,7 @@
"""
return [
cls.Chinese,
+ cls.German,
cls.Dutch,
cls.English,
cls.French,
| {"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py\n@@ -174,6 +174,7 @@\n \"\"\"\n return [\n cls.Chinese,\n+ cls.German,\n cls.Dutch,\n cls.English,\n cls.French,\n", "issue": "German language is not appropiate used when using Confirmprompts\n### The Issue\r\nI am building a chatbot for german users. I am sending the local \"de-de\" as user, and can confirm this actual arrives the bot. When i want to use Confirmprompts the bot returns Yes and No and not \"Ja\" \"Nein\". \r\n### The Solution\r\nAfter a lot of digging, I found the underlying cause and a fix. The culture model does not actually recognices German (de-de) as supported language, and thus switches to the default (english). But in the prompt_culture_models.py German actualy exists and ther is a todo \"# TODO: Replace with Culture.German after Recognizers-Text package updates.\" Which I looked up and the Recognizers-Text package sis already updated :) . Still this is not the real issue. \r\n\r\nThe reason is that german is not listed in the supported cultures function. I simply added it and every thing works fine. \r\n\r\n` @classmethod\r\n def get_supported_cultures(cls) -> List[PromptCultureModel]:\r\n \"\"\"\r\n Gets a list of the supported culture models.\r\n \"\"\"\r\n return [\r\n cls.Chinese,\r\n cls.German,\r\n cls.Dutch,\r\n cls.English,\r\n cls.French,\r\n cls.Italian,\r\n cls.Japanese,\r\n cls.Korean,\r\n cls.Portuguese,\r\n cls.Spanish,\r\n cls.Turkish,\r\n ]`\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import List\n\nfrom recognizers_text import Culture\n\n\nclass PromptCultureModel:\n \"\"\"\n Culture model used in Choice and Confirm Prompts.\n \"\"\"\n\n def __init__(\n self,\n locale: str,\n separator: str,\n inline_or: str,\n inline_or_more: str,\n yes_in_language: str,\n no_in_language: str,\n ):\n \"\"\"\n\n :param locale: Culture Model's Locale. Example: \"en-US\".\n :param separator: Culture Model's Inline Separator. Example: \", \".\n :param inline_or: Culture Model's Inline Or. Example: \" or \".\n :param inline_or_more Culture Model's Inline Or More. Example: \", or \".\n :param yes_in_language: Equivalent of \"Yes\" in Culture Model's Language. Example: \"Yes\".\n :param no_in_language: Equivalent of \"No\" in Culture Model's Language. Example: \"No\".\n \"\"\"\n self.locale = locale\n self.separator = separator\n self.inline_or = inline_or\n self.inline_or_more = inline_or_more\n self.yes_in_language = yes_in_language\n self.no_in_language = no_in_language\n\n\nclass PromptCultureModels:\n \"\"\"\n Class container for currently-supported Culture Models in Confirm and Choice Prompt.\n \"\"\"\n\n Chinese = PromptCultureModel(\n locale=Culture.Chinese,\n inline_or=\" \u8981\u4e48 \",\n inline_or_more=\"\uff0c \u8981\u4e48 \",\n separator=\"\uff0c \",\n no_in_language=\"\u4e0d\",\n yes_in_language=\"\u662f\u7684\",\n )\n\n Dutch = PromptCultureModel(\n locale=Culture.Dutch,\n inline_or=\" of \",\n inline_or_more=\", of \",\n separator=\", \",\n no_in_language=\"Nee\",\n yes_in_language=\"Ja\",\n )\n\n English = PromptCultureModel(\n locale=Culture.English,\n inline_or=\" or \",\n inline_or_more=\", or \",\n separator=\", \",\n no_in_language=\"No\",\n yes_in_language=\"Yes\",\n )\n\n French = PromptCultureModel(\n locale=Culture.French,\n inline_or=\" ou \",\n inline_or_more=\", ou \",\n separator=\", \",\n no_in_language=\"Non\",\n yes_in_language=\"Oui\",\n )\n\n German = PromptCultureModel(\n # TODO: Replace with Culture.German after Recognizers-Text package updates.\n locale=\"de-de\",\n inline_or=\" oder \",\n inline_or_more=\", oder \",\n separator=\", \",\n no_in_language=\"Nein\",\n yes_in_language=\"Ja\",\n )\n\n Italian = PromptCultureModel(\n locale=Culture.Italian,\n inline_or=\" o \",\n inline_or_more=\" o \",\n separator=\", \",\n no_in_language=\"No\",\n yes_in_language=\"Si\",\n )\n\n Japanese = PromptCultureModel(\n locale=Culture.Japanese,\n inline_or=\" \u307e\u305f\u306f \",\n inline_or_more=\"\u3001 \u307e\u305f\u306f \",\n separator=\"\u3001 \",\n no_in_language=\"\u3044\u3044\u3048\",\n yes_in_language=\"\u306f\u3044\",\n )\n\n Korean = PromptCultureModel(\n locale=Culture.Korean,\n inline_or=\" \ub610\ub294 \",\n inline_or_more=\" \ub610\ub294 \",\n separator=\", \",\n no_in_language=\"\uc544\ub2c8\",\n yes_in_language=\"\uc608\",\n )\n\n Portuguese = PromptCultureModel(\n locale=Culture.Portuguese,\n inline_or=\" ou \",\n inline_or_more=\", ou \",\n separator=\", \",\n no_in_language=\"N\u00e3o\",\n yes_in_language=\"Sim\",\n )\n\n Spanish = PromptCultureModel(\n locale=Culture.Spanish,\n inline_or=\" o \",\n inline_or_more=\", o \",\n separator=\", \",\n no_in_language=\"No\",\n yes_in_language=\"S\u00ed\",\n )\n\n Turkish = PromptCultureModel(\n locale=Culture.Turkish,\n inline_or=\" veya \",\n inline_or_more=\" veya \",\n separator=\", \",\n no_in_language=\"Hay\u0131r\",\n yes_in_language=\"Evet\",\n )\n\n @classmethod\n def map_to_nearest_language(cls, culture_code: str) -> str:\n \"\"\"\n Normalize various potential locale strings to a standard.\n :param culture_code: Represents locale. Examples: \"en-US, en-us, EN\".\n :return: Normalized locale.\n :rtype: str\n\n .. remarks::\n In our other SDKs, this method is a copy/paste of the ones from the Recognizers-Text library.\n However, that doesn't exist in Python.\n \"\"\"\n if culture_code:\n culture_code = culture_code.lower()\n supported_culture_codes = cls._get_supported_locales()\n\n if culture_code not in supported_culture_codes:\n culture_prefix = culture_code.split(\"-\")[0]\n\n for supported_culture_code in supported_culture_codes:\n if supported_culture_code.startswith(culture_prefix):\n culture_code = supported_culture_code\n\n return culture_code\n\n @classmethod\n def get_supported_cultures(cls) -> List[PromptCultureModel]:\n \"\"\"\n Gets a list of the supported culture models.\n \"\"\"\n return [\n cls.Chinese,\n cls.Dutch,\n cls.English,\n cls.French,\n cls.Italian,\n cls.Japanese,\n cls.Korean,\n cls.Portuguese,\n cls.Spanish,\n cls.Turkish,\n ]\n\n @classmethod\n def _get_supported_locales(cls) -> List[str]:\n return [c.locale for c in cls.get_supported_cultures()]\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py"}]} | 2,519 | 137 |
gh_patches_debug_13137 | rasdani/github-patches | git_diff | genialis__resolwe-313 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
404 returned when deleting entity with `delete_content` set to `true`
Probably because sample gets deleted when the last data object is deleted.
</issue>
<code>
[start of resolwe/flow/views/entity.py]
1 """Entity viewset."""
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from distutils.util import strtobool # pylint: disable=import-error,no-name-in-module
5
6 from django.db.models import Max
7 from django.db.models.query import Prefetch
8
9 from rest_framework import exceptions, status
10 from rest_framework.decorators import detail_route
11 from rest_framework.response import Response
12
13 from resolwe.flow.filters import EntityFilter
14 from resolwe.flow.models import Collection, Data, Entity
15 from resolwe.flow.serializers import EntitySerializer
16 from resolwe.permissions.utils import remove_permission, update_permission
17
18 from .collection import CollectionViewSet
19
20
21 class EntityViewSet(CollectionViewSet):
22 """API view for entities."""
23
24 filter_class = EntityFilter
25 serializer_class = EntitySerializer
26
27 queryset = Entity.objects.prefetch_related(
28 Prefetch('data', queryset=Data.objects.all().order_by('id')),
29 'descriptor_schema',
30 'contributor'
31 ).annotate(
32 latest_date=Max('data__modified')
33 ).order_by('-latest_date')
34
35 def _check_collection_permissions(self, collection_id, user):
36 """Check that collection exists and user has `add` permission."""
37 collection_query = Collection.objects.filter(pk=collection_id)
38 if not collection_query.exists():
39 raise exceptions.ValidationError('Collection id does not exist')
40
41 collection = collection_query.first()
42 if not user.has_perm('add_collection', obj=collection):
43 if user.is_authenticated():
44 raise exceptions.PermissionDenied()
45 else:
46 raise exceptions.NotFound()
47
48 def set_content_permissions(self, user, obj, payload):
49 """Apply permissions to data objects in ``Entity``."""
50 # Data doesn't have "ADD" permission, so it has to be removed
51 payload = remove_permission(payload, 'add')
52
53 for data in obj.data.all():
54 if user.has_perm('share_data', data):
55 update_permission(data, payload)
56
57 def destroy(self, request, *args, **kwargs):
58 """Destroy a model instance.
59
60 If ``delete_content`` flag is set in query parameters, also all
61 Data objects contained in entity will be deleted.
62 """
63 obj = self.get_object()
64 user = request.user
65
66 if strtobool(request.query_params.get('delete_content', 'false')):
67 for data in obj.data.all():
68 if user.has_perm('edit_data', data):
69 data.delete()
70
71 # NOTE: Collection's ``destroy`` method should be skiped, so we
72 # intentionaly call it's parent.
73 return super(CollectionViewSet, self).destroy( # pylint: disable=no-member,bad-super-call
74 request, *args, **kwargs
75 )
76
77 @detail_route(methods=[u'post'])
78 def add_to_collection(self, request, pk=None):
79 """Add Entity to a collection."""
80 entity = self.get_object()
81
82 if 'ids' not in request.data:
83 return Response({"error": "`ids` parameter is required"}, status=status.HTTP_400_BAD_REQUEST)
84
85 for collection_id in request.data['ids']:
86 self._check_collection_permissions(collection_id, request.user)
87
88 for collection_id in request.data['ids']:
89 entity.collections.add(collection_id)
90
91 collection = Collection.objects.get(pk=collection_id)
92 for data in entity.data.all():
93 collection.data.add(data)
94
95 return Response()
96
97 @detail_route(methods=[u'post'])
98 def remove_from_collection(self, request, pk=None):
99 """Remove Entity from a collection."""
100 entity = self.get_object()
101
102 if 'ids' not in request.data:
103 return Response({"error": "`ids` parameter is required"}, status=status.HTTP_400_BAD_REQUEST)
104
105 for collection_id in request.data['ids']:
106 self._check_collection_permissions(collection_id, request.user)
107
108 for collection_id in request.data['ids']:
109 entity.collections.remove(collection_id)
110
111 collection = Collection.objects.get(pk=collection_id)
112 for data in entity.data.all():
113 collection.data.remove(data)
114
115 return Response()
116
117 @detail_route(methods=[u'post'])
118 def add_data(self, request, pk=None):
119 """Add data to Entity and it's collection."""
120 # add data to entity
121 resp = super(EntityViewSet, self).add_data(request, pk)
122
123 # add data to collections in which entity is
124 entity = self.get_object()
125 for collection in entity.collections.all():
126 collection.data.add(*request.data['ids'])
127
128 return resp
129
[end of resolwe/flow/views/entity.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/resolwe/flow/views/entity.py b/resolwe/flow/views/entity.py
--- a/resolwe/flow/views/entity.py
+++ b/resolwe/flow/views/entity.py
@@ -68,6 +68,11 @@
if user.has_perm('edit_data', data):
data.delete()
+ # If all data objects in an entity are removed, the entity may
+ # have already been removed, so there is no need to call destroy.
+ if not Entity.objects.filter(pk=obj.pk).exists():
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
# NOTE: Collection's ``destroy`` method should be skiped, so we
# intentionaly call it's parent.
return super(CollectionViewSet, self).destroy( # pylint: disable=no-member,bad-super-call
| {"golden_diff": "diff --git a/resolwe/flow/views/entity.py b/resolwe/flow/views/entity.py\n--- a/resolwe/flow/views/entity.py\n+++ b/resolwe/flow/views/entity.py\n@@ -68,6 +68,11 @@\n if user.has_perm('edit_data', data):\n data.delete()\n \n+ # If all data objects in an entity are removed, the entity may\n+ # have already been removed, so there is no need to call destroy.\n+ if not Entity.objects.filter(pk=obj.pk).exists():\n+ return Response(status=status.HTTP_204_NO_CONTENT)\n+\n # NOTE: Collection's ``destroy`` method should be skiped, so we\n # intentionaly call it's parent.\n return super(CollectionViewSet, self).destroy( # pylint: disable=no-member,bad-super-call\n", "issue": "404 returned when deleting entity with `delete_content` set to `true`\nProbably because sample gets deleted when the last data object is deleted.\n", "before_files": [{"content": "\"\"\"Entity viewset.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom distutils.util import strtobool # pylint: disable=import-error,no-name-in-module\n\nfrom django.db.models import Max\nfrom django.db.models.query import Prefetch\n\nfrom rest_framework import exceptions, status\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.response import Response\n\nfrom resolwe.flow.filters import EntityFilter\nfrom resolwe.flow.models import Collection, Data, Entity\nfrom resolwe.flow.serializers import EntitySerializer\nfrom resolwe.permissions.utils import remove_permission, update_permission\n\nfrom .collection import CollectionViewSet\n\n\nclass EntityViewSet(CollectionViewSet):\n \"\"\"API view for entities.\"\"\"\n\n filter_class = EntityFilter\n serializer_class = EntitySerializer\n\n queryset = Entity.objects.prefetch_related(\n Prefetch('data', queryset=Data.objects.all().order_by('id')),\n 'descriptor_schema',\n 'contributor'\n ).annotate(\n latest_date=Max('data__modified')\n ).order_by('-latest_date')\n\n def _check_collection_permissions(self, collection_id, user):\n \"\"\"Check that collection exists and user has `add` permission.\"\"\"\n collection_query = Collection.objects.filter(pk=collection_id)\n if not collection_query.exists():\n raise exceptions.ValidationError('Collection id does not exist')\n\n collection = collection_query.first()\n if not user.has_perm('add_collection', obj=collection):\n if user.is_authenticated():\n raise exceptions.PermissionDenied()\n else:\n raise exceptions.NotFound()\n\n def set_content_permissions(self, user, obj, payload):\n \"\"\"Apply permissions to data objects in ``Entity``.\"\"\"\n # Data doesn't have \"ADD\" permission, so it has to be removed\n payload = remove_permission(payload, 'add')\n\n for data in obj.data.all():\n if user.has_perm('share_data', data):\n update_permission(data, payload)\n\n def destroy(self, request, *args, **kwargs):\n \"\"\"Destroy a model instance.\n\n If ``delete_content`` flag is set in query parameters, also all\n Data objects contained in entity will be deleted.\n \"\"\"\n obj = self.get_object()\n user = request.user\n\n if strtobool(request.query_params.get('delete_content', 'false')):\n for data in obj.data.all():\n if user.has_perm('edit_data', data):\n data.delete()\n\n # NOTE: Collection's ``destroy`` method should be skiped, so we\n # intentionaly call it's parent.\n return super(CollectionViewSet, self).destroy( # pylint: disable=no-member,bad-super-call\n request, *args, **kwargs\n )\n\n @detail_route(methods=[u'post'])\n def add_to_collection(self, request, pk=None):\n \"\"\"Add Entity to a collection.\"\"\"\n entity = self.get_object()\n\n if 'ids' not in request.data:\n return Response({\"error\": \"`ids` parameter is required\"}, status=status.HTTP_400_BAD_REQUEST)\n\n for collection_id in request.data['ids']:\n self._check_collection_permissions(collection_id, request.user)\n\n for collection_id in request.data['ids']:\n entity.collections.add(collection_id)\n\n collection = Collection.objects.get(pk=collection_id)\n for data in entity.data.all():\n collection.data.add(data)\n\n return Response()\n\n @detail_route(methods=[u'post'])\n def remove_from_collection(self, request, pk=None):\n \"\"\"Remove Entity from a collection.\"\"\"\n entity = self.get_object()\n\n if 'ids' not in request.data:\n return Response({\"error\": \"`ids` parameter is required\"}, status=status.HTTP_400_BAD_REQUEST)\n\n for collection_id in request.data['ids']:\n self._check_collection_permissions(collection_id, request.user)\n\n for collection_id in request.data['ids']:\n entity.collections.remove(collection_id)\n\n collection = Collection.objects.get(pk=collection_id)\n for data in entity.data.all():\n collection.data.remove(data)\n\n return Response()\n\n @detail_route(methods=[u'post'])\n def add_data(self, request, pk=None):\n \"\"\"Add data to Entity and it's collection.\"\"\"\n # add data to entity\n resp = super(EntityViewSet, self).add_data(request, pk)\n\n # add data to collections in which entity is\n entity = self.get_object()\n for collection in entity.collections.all():\n collection.data.add(*request.data['ids'])\n\n return resp\n", "path": "resolwe/flow/views/entity.py"}]} | 1,810 | 185 |
gh_patches_debug_41642 | rasdani/github-patches | git_diff | pytorch__vision-3656 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Avoid pandas dependency for CelebA dataset
The CelebA dataset has a runtime dependency on `pandas`. Since all we need is `pandas.read_csv`, it would be interesting to see if we can replace `pandas.read_csv` by just the builtin `csv` module. The mergability of the PR would depend on how ugly / complex the code becomes... :)
cc @pmeier
</issue>
<code>
[start of torchvision/datasets/celeba.py]
1 from functools import partial
2 import torch
3 import os
4 import PIL
5 from typing import Any, Callable, List, Optional, Union, Tuple
6 from .vision import VisionDataset
7 from .utils import download_file_from_google_drive, check_integrity, verify_str_arg
8
9
10 class CelebA(VisionDataset):
11 """`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.
12
13 Args:
14 root (string): Root directory where images are downloaded to.
15 split (string): One of {'train', 'valid', 'test', 'all'}.
16 Accordingly dataset is selected.
17 target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``,
18 or ``landmarks``. Can also be a list to output a tuple with all specified target types.
19 The targets represent:
20
21 - ``attr`` (np.array shape=(40,) dtype=int): binary (0, 1) labels for attributes
22 - ``identity`` (int): label for each person (data points with the same identity are the same person)
23 - ``bbox`` (np.array shape=(4,) dtype=int): bounding box (x, y, width, height)
24 - ``landmarks`` (np.array shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x,
25 righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y)
26
27 Defaults to ``attr``. If empty, ``None`` will be returned as target.
28
29 transform (callable, optional): A function/transform that takes in an PIL image
30 and returns a transformed version. E.g, ``transforms.ToTensor``
31 target_transform (callable, optional): A function/transform that takes in the
32 target and transforms it.
33 download (bool, optional): If true, downloads the dataset from the internet and
34 puts it in root directory. If dataset is already downloaded, it is not
35 downloaded again.
36 """
37
38 base_folder = "celeba"
39 # There currently does not appear to be a easy way to extract 7z in python (without introducing additional
40 # dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available
41 # right now.
42 file_list = [
43 # File ID MD5 Hash Filename
44 ("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"),
45 # ("0B7EVK8r0v71pbWNEUjJKdDQ3dGc","b6cd7e93bc7a96c2dc33f819aa3ac651", "img_align_celeba_png.7z"),
46 # ("0B7EVK8r0v71peklHb0pGdDl6R28", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_celeba.7z"),
47 ("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"),
48 ("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"),
49 ("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"),
50 ("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"),
51 # ("0B7EVK8r0v71pTzJIdlJWdHczRlU", "063ee6ddb681f96bc9ca28c6febb9d1a", "list_landmarks_celeba.txt"),
52 ("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"),
53 ]
54
55 def __init__(
56 self,
57 root: str,
58 split: str = "train",
59 target_type: Union[List[str], str] = "attr",
60 transform: Optional[Callable] = None,
61 target_transform: Optional[Callable] = None,
62 download: bool = False,
63 ) -> None:
64 import pandas
65 super(CelebA, self).__init__(root, transform=transform,
66 target_transform=target_transform)
67 self.split = split
68 if isinstance(target_type, list):
69 self.target_type = target_type
70 else:
71 self.target_type = [target_type]
72
73 if not self.target_type and self.target_transform is not None:
74 raise RuntimeError('target_transform is specified but target_type is empty')
75
76 if download:
77 self.download()
78
79 if not self._check_integrity():
80 raise RuntimeError('Dataset not found or corrupted.' +
81 ' You can use download=True to download it')
82
83 split_map = {
84 "train": 0,
85 "valid": 1,
86 "test": 2,
87 "all": None,
88 }
89 split_ = split_map[verify_str_arg(split.lower(), "split",
90 ("train", "valid", "test", "all"))]
91
92 fn = partial(os.path.join, self.root, self.base_folder)
93 splits = pandas.read_csv(fn("list_eval_partition.txt"), delim_whitespace=True, header=None, index_col=0)
94 identity = pandas.read_csv(fn("identity_CelebA.txt"), delim_whitespace=True, header=None, index_col=0)
95 bbox = pandas.read_csv(fn("list_bbox_celeba.txt"), delim_whitespace=True, header=1, index_col=0)
96 landmarks_align = pandas.read_csv(fn("list_landmarks_align_celeba.txt"), delim_whitespace=True, header=1)
97 attr = pandas.read_csv(fn("list_attr_celeba.txt"), delim_whitespace=True, header=1)
98
99 mask = slice(None) if split_ is None else (splits[1] == split_)
100
101 self.filename = splits[mask].index.values
102 self.identity = torch.as_tensor(identity[mask].values)
103 self.bbox = torch.as_tensor(bbox[mask].values)
104 self.landmarks_align = torch.as_tensor(landmarks_align[mask].values)
105 self.attr = torch.as_tensor(attr[mask].values)
106 self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}
107 self.attr_names = list(attr.columns)
108
109 def _check_integrity(self) -> bool:
110 for (_, md5, filename) in self.file_list:
111 fpath = os.path.join(self.root, self.base_folder, filename)
112 _, ext = os.path.splitext(filename)
113 # Allow original archive to be deleted (zip and 7z)
114 # Only need the extracted images
115 if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5):
116 return False
117
118 # Should check a hash of the images
119 return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba"))
120
121 def download(self) -> None:
122 import zipfile
123
124 if self._check_integrity():
125 print('Files already downloaded and verified')
126 return
127
128 for (file_id, md5, filename) in self.file_list:
129 download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5)
130
131 with zipfile.ZipFile(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"), "r") as f:
132 f.extractall(os.path.join(self.root, self.base_folder))
133
134 def __getitem__(self, index: int) -> Tuple[Any, Any]:
135 X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index]))
136
137 target: Any = []
138 for t in self.target_type:
139 if t == "attr":
140 target.append(self.attr[index, :])
141 elif t == "identity":
142 target.append(self.identity[index, 0])
143 elif t == "bbox":
144 target.append(self.bbox[index, :])
145 elif t == "landmarks":
146 target.append(self.landmarks_align[index, :])
147 else:
148 # TODO: refactor with utils.verify_str_arg
149 raise ValueError("Target type \"{}\" is not recognized.".format(t))
150
151 if self.transform is not None:
152 X = self.transform(X)
153
154 if target:
155 target = tuple(target) if len(target) > 1 else target[0]
156
157 if self.target_transform is not None:
158 target = self.target_transform(target)
159 else:
160 target = None
161
162 return X, target
163
164 def __len__(self) -> int:
165 return len(self.attr)
166
167 def extra_repr(self) -> str:
168 lines = ["Target type: {target_type}", "Split: {split}"]
169 return '\n'.join(lines).format(**self.__dict__)
170
[end of torchvision/datasets/celeba.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/datasets/celeba.py b/torchvision/datasets/celeba.py
--- a/torchvision/datasets/celeba.py
+++ b/torchvision/datasets/celeba.py
@@ -1,3 +1,5 @@
+from collections import namedtuple
+import csv
from functools import partial
import torch
import os
@@ -6,6 +8,8 @@
from .vision import VisionDataset
from .utils import download_file_from_google_drive, check_integrity, verify_str_arg
+CSV = namedtuple("CSV", ["header", "index", "data"])
+
class CelebA(VisionDataset):
"""`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.
@@ -61,7 +65,6 @@
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
- import pandas
super(CelebA, self).__init__(root, transform=transform,
target_transform=target_transform)
self.split = split
@@ -88,23 +91,42 @@
}
split_ = split_map[verify_str_arg(split.lower(), "split",
("train", "valid", "test", "all"))]
+ splits = self._load_csv("list_eval_partition.txt")
+ identity = self._load_csv("identity_CelebA.txt")
+ bbox = self._load_csv("list_bbox_celeba.txt", header=1)
+ landmarks_align = self._load_csv("list_landmarks_align_celeba.txt", header=1)
+ attr = self._load_csv("list_attr_celeba.txt", header=1)
+
+ mask = slice(None) if split_ is None else (splits.data == split_).squeeze()
+
+ self.filename = splits.index
+ self.identity = identity.data[mask]
+ self.bbox = bbox.data[mask]
+ self.landmarks_align = landmarks_align.data[mask]
+ self.attr = attr.data[mask]
+ self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}
+ self.attr_names = attr.header
+
+ def _load_csv(
+ self,
+ filename: str,
+ header: Optional[int] = None,
+ ) -> CSV:
+ data, indices, headers = [], [], []
fn = partial(os.path.join, self.root, self.base_folder)
- splits = pandas.read_csv(fn("list_eval_partition.txt"), delim_whitespace=True, header=None, index_col=0)
- identity = pandas.read_csv(fn("identity_CelebA.txt"), delim_whitespace=True, header=None, index_col=0)
- bbox = pandas.read_csv(fn("list_bbox_celeba.txt"), delim_whitespace=True, header=1, index_col=0)
- landmarks_align = pandas.read_csv(fn("list_landmarks_align_celeba.txt"), delim_whitespace=True, header=1)
- attr = pandas.read_csv(fn("list_attr_celeba.txt"), delim_whitespace=True, header=1)
-
- mask = slice(None) if split_ is None else (splits[1] == split_)
-
- self.filename = splits[mask].index.values
- self.identity = torch.as_tensor(identity[mask].values)
- self.bbox = torch.as_tensor(bbox[mask].values)
- self.landmarks_align = torch.as_tensor(landmarks_align[mask].values)
- self.attr = torch.as_tensor(attr[mask].values)
- self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}
- self.attr_names = list(attr.columns)
+ with open(fn(filename)) as csv_file:
+ data = list(csv.reader(csv_file, delimiter=' ', skipinitialspace=True))
+
+ if header is not None:
+ headers = data[header]
+ data = data[header + 1:]
+
+ indices = [row[0] for row in data]
+ data = [row[1:] for row in data]
+ data_int = [list(map(int, i)) for i in data]
+
+ return CSV(headers, indices, torch.tensor(data_int))
def _check_integrity(self) -> bool:
for (_, md5, filename) in self.file_list:
| {"golden_diff": "diff --git a/torchvision/datasets/celeba.py b/torchvision/datasets/celeba.py\n--- a/torchvision/datasets/celeba.py\n+++ b/torchvision/datasets/celeba.py\n@@ -1,3 +1,5 @@\n+from collections import namedtuple\n+import csv\n from functools import partial\n import torch\n import os\n@@ -6,6 +8,8 @@\n from .vision import VisionDataset\n from .utils import download_file_from_google_drive, check_integrity, verify_str_arg\n \n+CSV = namedtuple(\"CSV\", [\"header\", \"index\", \"data\"])\n+\n \n class CelebA(VisionDataset):\n \"\"\"`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.\n@@ -61,7 +65,6 @@\n target_transform: Optional[Callable] = None,\n download: bool = False,\n ) -> None:\n- import pandas\n super(CelebA, self).__init__(root, transform=transform,\n target_transform=target_transform)\n self.split = split\n@@ -88,23 +91,42 @@\n }\n split_ = split_map[verify_str_arg(split.lower(), \"split\",\n (\"train\", \"valid\", \"test\", \"all\"))]\n+ splits = self._load_csv(\"list_eval_partition.txt\")\n+ identity = self._load_csv(\"identity_CelebA.txt\")\n+ bbox = self._load_csv(\"list_bbox_celeba.txt\", header=1)\n+ landmarks_align = self._load_csv(\"list_landmarks_align_celeba.txt\", header=1)\n+ attr = self._load_csv(\"list_attr_celeba.txt\", header=1)\n+\n+ mask = slice(None) if split_ is None else (splits.data == split_).squeeze()\n+\n+ self.filename = splits.index\n+ self.identity = identity.data[mask]\n+ self.bbox = bbox.data[mask]\n+ self.landmarks_align = landmarks_align.data[mask]\n+ self.attr = attr.data[mask]\n+ self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}\n+ self.attr_names = attr.header\n+\n+ def _load_csv(\n+ self,\n+ filename: str,\n+ header: Optional[int] = None,\n+ ) -> CSV:\n+ data, indices, headers = [], [], []\n \n fn = partial(os.path.join, self.root, self.base_folder)\n- splits = pandas.read_csv(fn(\"list_eval_partition.txt\"), delim_whitespace=True, header=None, index_col=0)\n- identity = pandas.read_csv(fn(\"identity_CelebA.txt\"), delim_whitespace=True, header=None, index_col=0)\n- bbox = pandas.read_csv(fn(\"list_bbox_celeba.txt\"), delim_whitespace=True, header=1, index_col=0)\n- landmarks_align = pandas.read_csv(fn(\"list_landmarks_align_celeba.txt\"), delim_whitespace=True, header=1)\n- attr = pandas.read_csv(fn(\"list_attr_celeba.txt\"), delim_whitespace=True, header=1)\n-\n- mask = slice(None) if split_ is None else (splits[1] == split_)\n-\n- self.filename = splits[mask].index.values\n- self.identity = torch.as_tensor(identity[mask].values)\n- self.bbox = torch.as_tensor(bbox[mask].values)\n- self.landmarks_align = torch.as_tensor(landmarks_align[mask].values)\n- self.attr = torch.as_tensor(attr[mask].values)\n- self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}\n- self.attr_names = list(attr.columns)\n+ with open(fn(filename)) as csv_file:\n+ data = list(csv.reader(csv_file, delimiter=' ', skipinitialspace=True))\n+\n+ if header is not None:\n+ headers = data[header]\n+ data = data[header + 1:]\n+\n+ indices = [row[0] for row in data]\n+ data = [row[1:] for row in data]\n+ data_int = [list(map(int, i)) for i in data]\n+\n+ return CSV(headers, indices, torch.tensor(data_int))\n \n def _check_integrity(self) -> bool:\n for (_, md5, filename) in self.file_list:\n", "issue": "Avoid pandas dependency for CelebA dataset\nThe CelebA dataset has a runtime dependency on `pandas`. Since all we need is `pandas.read_csv`, it would be interesting to see if we can replace `pandas.read_csv` by just the builtin `csv` module. The mergability of the PR would depend on how ugly / complex the code becomes... :)\n\ncc @pmeier\n", "before_files": [{"content": "from functools import partial\nimport torch\nimport os\nimport PIL\nfrom typing import Any, Callable, List, Optional, Union, Tuple\nfrom .vision import VisionDataset\nfrom .utils import download_file_from_google_drive, check_integrity, verify_str_arg\n\n\nclass CelebA(VisionDataset):\n \"\"\"`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.\n\n Args:\n root (string): Root directory where images are downloaded to.\n split (string): One of {'train', 'valid', 'test', 'all'}.\n Accordingly dataset is selected.\n target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``,\n or ``landmarks``. Can also be a list to output a tuple with all specified target types.\n The targets represent:\n\n - ``attr`` (np.array shape=(40,) dtype=int): binary (0, 1) labels for attributes\n - ``identity`` (int): label for each person (data points with the same identity are the same person)\n - ``bbox`` (np.array shape=(4,) dtype=int): bounding box (x, y, width, height)\n - ``landmarks`` (np.array shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x,\n righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y)\n\n Defaults to ``attr``. If empty, ``None`` will be returned as target.\n\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.ToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n \"\"\"\n\n base_folder = \"celeba\"\n # There currently does not appear to be a easy way to extract 7z in python (without introducing additional\n # dependencies). The \"in-the-wild\" (not aligned+cropped) images are only in 7z, so they are not available\n # right now.\n file_list = [\n # File ID MD5 Hash Filename\n (\"0B7EVK8r0v71pZjFTYXZWM3FlRnM\", \"00d2c5bc6d35e252742224ab0c1e8fcb\", \"img_align_celeba.zip\"),\n # (\"0B7EVK8r0v71pbWNEUjJKdDQ3dGc\",\"b6cd7e93bc7a96c2dc33f819aa3ac651\", \"img_align_celeba_png.7z\"),\n # (\"0B7EVK8r0v71peklHb0pGdDl6R28\", \"b6cd7e93bc7a96c2dc33f819aa3ac651\", \"img_celeba.7z\"),\n (\"0B7EVK8r0v71pblRyaVFSWGxPY0U\", \"75e246fa4810816ffd6ee81facbd244c\", \"list_attr_celeba.txt\"),\n (\"1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS\", \"32bd1bd63d3c78cd57e08160ec5ed1e2\", \"identity_CelebA.txt\"),\n (\"0B7EVK8r0v71pbThiMVRxWXZ4dU0\", \"00566efa6fedff7a56946cd1c10f1c16\", \"list_bbox_celeba.txt\"),\n (\"0B7EVK8r0v71pd0FJY3Blby1HUTQ\", \"cc24ecafdb5b50baae59b03474781f8c\", \"list_landmarks_align_celeba.txt\"),\n # (\"0B7EVK8r0v71pTzJIdlJWdHczRlU\", \"063ee6ddb681f96bc9ca28c6febb9d1a\", \"list_landmarks_celeba.txt\"),\n (\"0B7EVK8r0v71pY0NSMzRuSXJEVkk\", \"d32c9cbf5e040fd4025c592c306e6668\", \"list_eval_partition.txt\"),\n ]\n\n def __init__(\n self,\n root: str,\n split: str = \"train\",\n target_type: Union[List[str], str] = \"attr\",\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n download: bool = False,\n ) -> None:\n import pandas\n super(CelebA, self).__init__(root, transform=transform,\n target_transform=target_transform)\n self.split = split\n if isinstance(target_type, list):\n self.target_type = target_type\n else:\n self.target_type = [target_type]\n\n if not self.target_type and self.target_transform is not None:\n raise RuntimeError('target_transform is specified but target_type is empty')\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError('Dataset not found or corrupted.' +\n ' You can use download=True to download it')\n\n split_map = {\n \"train\": 0,\n \"valid\": 1,\n \"test\": 2,\n \"all\": None,\n }\n split_ = split_map[verify_str_arg(split.lower(), \"split\",\n (\"train\", \"valid\", \"test\", \"all\"))]\n\n fn = partial(os.path.join, self.root, self.base_folder)\n splits = pandas.read_csv(fn(\"list_eval_partition.txt\"), delim_whitespace=True, header=None, index_col=0)\n identity = pandas.read_csv(fn(\"identity_CelebA.txt\"), delim_whitespace=True, header=None, index_col=0)\n bbox = pandas.read_csv(fn(\"list_bbox_celeba.txt\"), delim_whitespace=True, header=1, index_col=0)\n landmarks_align = pandas.read_csv(fn(\"list_landmarks_align_celeba.txt\"), delim_whitespace=True, header=1)\n attr = pandas.read_csv(fn(\"list_attr_celeba.txt\"), delim_whitespace=True, header=1)\n\n mask = slice(None) if split_ is None else (splits[1] == split_)\n\n self.filename = splits[mask].index.values\n self.identity = torch.as_tensor(identity[mask].values)\n self.bbox = torch.as_tensor(bbox[mask].values)\n self.landmarks_align = torch.as_tensor(landmarks_align[mask].values)\n self.attr = torch.as_tensor(attr[mask].values)\n self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}\n self.attr_names = list(attr.columns)\n\n def _check_integrity(self) -> bool:\n for (_, md5, filename) in self.file_list:\n fpath = os.path.join(self.root, self.base_folder, filename)\n _, ext = os.path.splitext(filename)\n # Allow original archive to be deleted (zip and 7z)\n # Only need the extracted images\n if ext not in [\".zip\", \".7z\"] and not check_integrity(fpath, md5):\n return False\n\n # Should check a hash of the images\n return os.path.isdir(os.path.join(self.root, self.base_folder, \"img_align_celeba\"))\n\n def download(self) -> None:\n import zipfile\n\n if self._check_integrity():\n print('Files already downloaded and verified')\n return\n\n for (file_id, md5, filename) in self.file_list:\n download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5)\n\n with zipfile.ZipFile(os.path.join(self.root, self.base_folder, \"img_align_celeba.zip\"), \"r\") as f:\n f.extractall(os.path.join(self.root, self.base_folder))\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n X = PIL.Image.open(os.path.join(self.root, self.base_folder, \"img_align_celeba\", self.filename[index]))\n\n target: Any = []\n for t in self.target_type:\n if t == \"attr\":\n target.append(self.attr[index, :])\n elif t == \"identity\":\n target.append(self.identity[index, 0])\n elif t == \"bbox\":\n target.append(self.bbox[index, :])\n elif t == \"landmarks\":\n target.append(self.landmarks_align[index, :])\n else:\n # TODO: refactor with utils.verify_str_arg\n raise ValueError(\"Target type \\\"{}\\\" is not recognized.\".format(t))\n\n if self.transform is not None:\n X = self.transform(X)\n\n if target:\n target = tuple(target) if len(target) > 1 else target[0]\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n else:\n target = None\n\n return X, target\n\n def __len__(self) -> int:\n return len(self.attr)\n\n def extra_repr(self) -> str:\n lines = [\"Target type: {target_type}\", \"Split: {split}\"]\n return '\\n'.join(lines).format(**self.__dict__)\n", "path": "torchvision/datasets/celeba.py"}]} | 3,225 | 972 |
gh_patches_debug_6684 | rasdani/github-patches | git_diff | netbox-community__netbox-11404 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scheduling a job in the past raises an exception
### NetBox version
v3.4.2
### Python version
3.10
### Steps to Reproduce
1. Create a script
2. Schedule it in the past
### Expected Behavior
Form validation error message
### Observed Behavior
```
Traceback (most recent call last):
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/core/handlers/exception.py", line 55, in inner
response = get_response(request)
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/core/handlers/base.py", line 197, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/views/generic/base.py", line 103, in view
return self.dispatch(request, *args, **kwargs)
File "/home/main/devel/repos/netbox/netbox/utilities/views.py", line 53, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/views/generic/base.py", line 142, in dispatch
return handler(request, *args, **kwargs)
File "/home/main/devel/repos/netbox/netbox/extras/views.py", line 815, in post
elif form.is_valid():
File "/home/main/devel/repos/netbox/netbox/utilities/forms/forms.py", line 69, in is_valid
is_valid = super().is_valid()
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py", line 205, in is_valid
return self.is_bound and not self.errors
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py", line 200, in errors
self.full_clean()
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py", line 437, in full_clean
self._clean_fields()
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py", line 455, in _clean_fields
self.add_error(name, e)
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py", line 392, in add_error
raise TypeError(
TypeError: The argument `field` must be `None` when the `error` argument contains errors for multiple fields.
```
</issue>
<code>
[start of netbox/extras/forms/scripts.py]
1 from django import forms
2 from django.utils import timezone
3 from django.utils.translation import gettext as _
4
5 from utilities.forms import BootstrapMixin, DateTimePicker, SelectDurationWidget
6
7 __all__ = (
8 'ScriptForm',
9 )
10
11
12 class ScriptForm(BootstrapMixin, forms.Form):
13 _commit = forms.BooleanField(
14 required=False,
15 initial=True,
16 label=_("Commit changes"),
17 help_text=_("Commit changes to the database (uncheck for a dry-run)")
18 )
19 _schedule_at = forms.DateTimeField(
20 required=False,
21 widget=DateTimePicker(),
22 label=_("Schedule at"),
23 help_text=_("Schedule execution of script to a set time"),
24 )
25 _interval = forms.IntegerField(
26 required=False,
27 min_value=1,
28 label=_("Recurs every"),
29 widget=SelectDurationWidget(),
30 help_text=_("Interval at which this script is re-run (in minutes)")
31 )
32
33 def __init__(self, *args, **kwargs):
34 super().__init__(*args, **kwargs)
35
36 # Annotate the current system time for reference
37 now = timezone.now().strftime('%Y-%m-%d %H:%M:%S')
38 self.fields['_schedule_at'].help_text += f' (current time: <strong>{now}</strong>)'
39
40 # Move _commit and _schedule_at to the end of the form
41 schedule_at = self.fields.pop('_schedule_at')
42 interval = self.fields.pop('_interval')
43 commit = self.fields.pop('_commit')
44 self.fields['_schedule_at'] = schedule_at
45 self.fields['_interval'] = interval
46 self.fields['_commit'] = commit
47
48 def clean__schedule_at(self):
49 scheduled_time = self.cleaned_data['_schedule_at']
50 if scheduled_time and scheduled_time < timezone.now():
51 raise forms.ValidationError({
52 '_schedule_at': _('Scheduled time must be in the future.')
53 })
54
55 return scheduled_time
56
57 @property
58 def requires_input(self):
59 """
60 A boolean indicating whether the form requires user input (ignore the built-in fields).
61 """
62 return bool(len(self.fields) > 3)
63
[end of netbox/extras/forms/scripts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/extras/forms/scripts.py b/netbox/extras/forms/scripts.py
--- a/netbox/extras/forms/scripts.py
+++ b/netbox/extras/forms/scripts.py
@@ -48,9 +48,7 @@
def clean__schedule_at(self):
scheduled_time = self.cleaned_data['_schedule_at']
if scheduled_time and scheduled_time < timezone.now():
- raise forms.ValidationError({
- '_schedule_at': _('Scheduled time must be in the future.')
- })
+ raise forms.ValidationError(_('Scheduled time must be in the future.'))
return scheduled_time
| {"golden_diff": "diff --git a/netbox/extras/forms/scripts.py b/netbox/extras/forms/scripts.py\n--- a/netbox/extras/forms/scripts.py\n+++ b/netbox/extras/forms/scripts.py\n@@ -48,9 +48,7 @@\n def clean__schedule_at(self):\n scheduled_time = self.cleaned_data['_schedule_at']\n if scheduled_time and scheduled_time < timezone.now():\n- raise forms.ValidationError({\n- '_schedule_at': _('Scheduled time must be in the future.')\n- })\n+ raise forms.ValidationError(_('Scheduled time must be in the future.'))\n \n return scheduled_time\n", "issue": "Scheduling a job in the past raises an exception\n### NetBox version\n\nv3.4.2\n\n### Python version\n\n3.10\n\n### Steps to Reproduce\n\n1. Create a script\r\n2. Schedule it in the past\n\n### Expected Behavior\n\nForm validation error message\n\n### Observed Behavior\n\n```\r\nTraceback (most recent call last):\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/core/handlers/exception.py\", line 55, in inner\r\n response = get_response(request)\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/core/handlers/base.py\", line 197, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/views/generic/base.py\", line 103, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n File \"/home/main/devel/repos/netbox/netbox/utilities/views.py\", line 53, in dispatch\r\n return super().dispatch(request, *args, **kwargs)\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/views/generic/base.py\", line 142, in dispatch\r\n return handler(request, *args, **kwargs)\r\n File \"/home/main/devel/repos/netbox/netbox/extras/views.py\", line 815, in post\r\n elif form.is_valid():\r\n File \"/home/main/devel/repos/netbox/netbox/utilities/forms/forms.py\", line 69, in is_valid\r\n is_valid = super().is_valid()\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py\", line 205, in is_valid\r\n return self.is_bound and not self.errors\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py\", line 200, in errors\r\n self.full_clean()\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py\", line 437, in full_clean\r\n self._clean_fields()\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py\", line 455, in _clean_fields\r\n self.add_error(name, e)\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py\", line 392, in add_error\r\n raise TypeError(\r\nTypeError: The argument `field` must be `None` when the `error` argument contains errors for multiple fields.\r\n```\n", "before_files": [{"content": "from django import forms\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\n\nfrom utilities.forms import BootstrapMixin, DateTimePicker, SelectDurationWidget\n\n__all__ = (\n 'ScriptForm',\n)\n\n\nclass ScriptForm(BootstrapMixin, forms.Form):\n _commit = forms.BooleanField(\n required=False,\n initial=True,\n label=_(\"Commit changes\"),\n help_text=_(\"Commit changes to the database (uncheck for a dry-run)\")\n )\n _schedule_at = forms.DateTimeField(\n required=False,\n widget=DateTimePicker(),\n label=_(\"Schedule at\"),\n help_text=_(\"Schedule execution of script to a set time\"),\n )\n _interval = forms.IntegerField(\n required=False,\n min_value=1,\n label=_(\"Recurs every\"),\n widget=SelectDurationWidget(),\n help_text=_(\"Interval at which this script is re-run (in minutes)\")\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Annotate the current system time for reference\n now = timezone.now().strftime('%Y-%m-%d %H:%M:%S')\n self.fields['_schedule_at'].help_text += f' (current time: <strong>{now}</strong>)'\n\n # Move _commit and _schedule_at to the end of the form\n schedule_at = self.fields.pop('_schedule_at')\n interval = self.fields.pop('_interval')\n commit = self.fields.pop('_commit')\n self.fields['_schedule_at'] = schedule_at\n self.fields['_interval'] = interval\n self.fields['_commit'] = commit\n\n def clean__schedule_at(self):\n scheduled_time = self.cleaned_data['_schedule_at']\n if scheduled_time and scheduled_time < timezone.now():\n raise forms.ValidationError({\n '_schedule_at': _('Scheduled time must be in the future.')\n })\n\n return scheduled_time\n\n @property\n def requires_input(self):\n \"\"\"\n A boolean indicating whether the form requires user input (ignore the built-in fields).\n \"\"\"\n return bool(len(self.fields) > 3)\n", "path": "netbox/extras/forms/scripts.py"}]} | 1,726 | 127 |
gh_patches_debug_42729 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1227 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PORT] Replace UseState() with UseBotState()
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/3862
Fixes #3859
and use untyped params so that order and type are not fixed.
Tweak RegisterMiddlewareClass so you can provide the key for the turnstate.
# Changed projects
* Microsoft.Bot.Builder.Dialogs.Adaptive.Testing
* Microsoft.Bot.Builder
* Microsoft.Bot.Builder.AI.QnA.Tests
* Microsoft.Bot.Builder.Dialogs.Adaptive.Templates.Tests
* Microsoft.Bot.Builder.Dialogs.Adaptive.Tests
* Microsoft.Bot.Builder.Dialogs.Declarative.Tests
* Microsoft.Bot.Builder.Dialogs.Tests
* Microsoft.Bot.Builder.TestBot.Json
*
</issue>
<code>
[start of libraries/botbuilder-core/botbuilder/core/register_class_middleware.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 from typing import Callable, Awaitable
4
5 from botbuilder.core import Middleware, TurnContext
6
7
8 class RegisterClassMiddleware(Middleware):
9 """
10 Middleware for adding an object to or registering a service with the current turn context.
11 """
12
13 def __init__(self, service):
14 self.service = service
15
16 async def on_turn(
17 self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]
18 ):
19 # C# has TurnStateCollection with has overrides for adding items
20 # to TurnState. Python does not. In C#'s case, there is an 'Add'
21 # to handle adding object, and that uses the fully qualified class name.
22 context.turn_state[self.fullname(self.service)] = self.service
23 await logic()
24
25 @staticmethod
26 def fullname(obj):
27 module = obj.__class__.__module__
28 if module is None or module == str.__class__.__module__:
29 return obj.__class__.__name__ # Avoid reporting __builtin__
30 return module + "." + obj.__class__.__name__
31
[end of libraries/botbuilder-core/botbuilder/core/register_class_middleware.py]
[start of libraries/botbuilder-core/botbuilder/core/adapter_extensions.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 from botbuilder.core import (
4 BotAdapter,
5 Storage,
6 RegisterClassMiddleware,
7 UserState,
8 ConversationState,
9 AutoSaveStateMiddleware,
10 )
11
12
13 class AdapterExtensions:
14 @staticmethod
15 def use_storage(adapter: BotAdapter, storage: Storage) -> BotAdapter:
16 """
17 Registers a storage layer with the adapter. The storage object will be available via the turn context's
18 `turn_state` property.
19
20 :param adapter: The BotAdapter on which to register the storage object.
21 :param storage: The Storage object to register.
22 :return: The BotAdapter
23 """
24 return adapter.use(RegisterClassMiddleware(storage))
25
26 @staticmethod
27 def use_state(
28 adapter: BotAdapter,
29 user_state: UserState,
30 conversation_state: ConversationState,
31 auto: bool = True,
32 ) -> BotAdapter:
33 """
34 Registers user and conversation state objects with the adapter. These objects will be available via
35 the turn context's `turn_state` property.
36
37 :param adapter: The BotAdapter on which to register the state objects.
38 :param user_state: The UserState object to register.
39 :param conversation_state: The ConversationState object to register.
40 :param auto: True to automatically persist state each turn.
41 :return: The BotAdapter
42 """
43 if not adapter:
44 raise TypeError("BotAdapter is required")
45
46 if not user_state:
47 raise TypeError("UserState is required")
48
49 if not conversation_state:
50 raise TypeError("ConversationState is required")
51
52 adapter.use(RegisterClassMiddleware(user_state))
53 adapter.use(RegisterClassMiddleware(conversation_state))
54
55 if auto:
56 adapter.use(AutoSaveStateMiddleware([user_state, conversation_state]))
57
58 return adapter
59
[end of libraries/botbuilder-core/botbuilder/core/adapter_extensions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py b/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py
--- a/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py
+++ b/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py
@@ -1,7 +1,10 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
+from warnings import warn
+
from botbuilder.core import (
BotAdapter,
+ BotState,
Storage,
RegisterClassMiddleware,
UserState,
@@ -23,6 +26,39 @@
"""
return adapter.use(RegisterClassMiddleware(storage))
+ @staticmethod
+ def use_bot_state(
+ bot_adapter: BotAdapter, *bot_states: BotState, auto: bool = True
+ ) -> BotAdapter:
+ """
+ Registers bot state object into the TurnContext. The botstate will be available via the turn context.
+
+ :param bot_adapter: The BotAdapter on which to register the state objects.
+ :param bot_states: One or more BotState objects to register.
+ :return: The updated adapter.
+ """
+ if not bot_states:
+ raise TypeError("At least one BotAdapter is required")
+
+ for bot_state in bot_states:
+ bot_adapter.use(
+ RegisterClassMiddleware(
+ bot_state, AdapterExtensions.fullname(bot_state)
+ )
+ )
+
+ if auto:
+ bot_adapter.use(AutoSaveStateMiddleware(bot_states))
+
+ return bot_adapter
+
+ @staticmethod
+ def fullname(obj):
+ module = obj.__class__.__module__
+ if module is None or module == str.__class__.__module__:
+ return obj.__class__.__name__ # Avoid reporting __builtin__
+ return module + "." + obj.__class__.__name__
+
@staticmethod
def use_state(
adapter: BotAdapter,
@@ -31,7 +67,7 @@
auto: bool = True,
) -> BotAdapter:
"""
- Registers user and conversation state objects with the adapter. These objects will be available via
+ [DEPRECATED] Registers user and conversation state objects with the adapter. These objects will be available via
the turn context's `turn_state` property.
:param adapter: The BotAdapter on which to register the state objects.
@@ -40,6 +76,11 @@
:param auto: True to automatically persist state each turn.
:return: The BotAdapter
"""
+ warn(
+ "This method is deprecated in 4.9. You should use the method .use_bot_state() instead.",
+ DeprecationWarning,
+ )
+
if not adapter:
raise TypeError("BotAdapter is required")
diff --git a/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py b/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py
--- a/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py
+++ b/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py
@@ -10,8 +10,9 @@
Middleware for adding an object to or registering a service with the current turn context.
"""
- def __init__(self, service):
+ def __init__(self, service, key: str = None):
self.service = service
+ self._key = key
async def on_turn(
self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]
@@ -19,7 +20,8 @@
# C# has TurnStateCollection with has overrides for adding items
# to TurnState. Python does not. In C#'s case, there is an 'Add'
# to handle adding object, and that uses the fully qualified class name.
- context.turn_state[self.fullname(self.service)] = self.service
+ key = self._key or self.fullname(self.service)
+ context.turn_state[key] = self.service
await logic()
@staticmethod
| {"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py b/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py\n--- a/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py\n+++ b/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py\n@@ -1,7 +1,10 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n+from warnings import warn\n+\n from botbuilder.core import (\n BotAdapter,\n+ BotState,\n Storage,\n RegisterClassMiddleware,\n UserState,\n@@ -23,6 +26,39 @@\n \"\"\"\n return adapter.use(RegisterClassMiddleware(storage))\n \n+ @staticmethod\n+ def use_bot_state(\n+ bot_adapter: BotAdapter, *bot_states: BotState, auto: bool = True\n+ ) -> BotAdapter:\n+ \"\"\"\n+ Registers bot state object into the TurnContext. The botstate will be available via the turn context.\n+\n+ :param bot_adapter: The BotAdapter on which to register the state objects.\n+ :param bot_states: One or more BotState objects to register.\n+ :return: The updated adapter.\n+ \"\"\"\n+ if not bot_states:\n+ raise TypeError(\"At least one BotAdapter is required\")\n+\n+ for bot_state in bot_states:\n+ bot_adapter.use(\n+ RegisterClassMiddleware(\n+ bot_state, AdapterExtensions.fullname(bot_state)\n+ )\n+ )\n+\n+ if auto:\n+ bot_adapter.use(AutoSaveStateMiddleware(bot_states))\n+\n+ return bot_adapter\n+\n+ @staticmethod\n+ def fullname(obj):\n+ module = obj.__class__.__module__\n+ if module is None or module == str.__class__.__module__:\n+ return obj.__class__.__name__ # Avoid reporting __builtin__\n+ return module + \".\" + obj.__class__.__name__\n+\n @staticmethod\n def use_state(\n adapter: BotAdapter,\n@@ -31,7 +67,7 @@\n auto: bool = True,\n ) -> BotAdapter:\n \"\"\"\n- Registers user and conversation state objects with the adapter. These objects will be available via\n+ [DEPRECATED] Registers user and conversation state objects with the adapter. These objects will be available via\n the turn context's `turn_state` property.\n \n :param adapter: The BotAdapter on which to register the state objects.\n@@ -40,6 +76,11 @@\n :param auto: True to automatically persist state each turn.\n :return: The BotAdapter\n \"\"\"\n+ warn(\n+ \"This method is deprecated in 4.9. You should use the method .use_bot_state() instead.\",\n+ DeprecationWarning,\n+ )\n+\n if not adapter:\n raise TypeError(\"BotAdapter is required\")\n \ndiff --git a/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py b/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py\n--- a/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py\n+++ b/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py\n@@ -10,8 +10,9 @@\n Middleware for adding an object to or registering a service with the current turn context.\n \"\"\"\n \n- def __init__(self, service):\n+ def __init__(self, service, key: str = None):\n self.service = service\n+ self._key = key\n \n async def on_turn(\n self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]\n@@ -19,7 +20,8 @@\n # C# has TurnStateCollection with has overrides for adding items\n # to TurnState. Python does not. In C#'s case, there is an 'Add'\n # to handle adding object, and that uses the fully qualified class name.\n- context.turn_state[self.fullname(self.service)] = self.service\n+ key = self._key or self.fullname(self.service)\n+ context.turn_state[key] = self.service\n await logic()\n \n @staticmethod\n", "issue": "[PORT] Replace UseState() with UseBotState() \n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3862\n\nFixes #3859 \r\nand use untyped params so that order and type are not fixed.\r\nTweak RegisterMiddlewareClass so you can provide the key for the turnstate.\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder.Dialogs.Adaptive.Testing\r\n* Microsoft.Bot.Builder\r\n* Microsoft.Bot.Builder.AI.QnA.Tests\r\n* Microsoft.Bot.Builder.Dialogs.Adaptive.Templates.Tests\r\n* Microsoft.Bot.Builder.Dialogs.Adaptive.Tests\r\n* Microsoft.Bot.Builder.Dialogs.Declarative.Tests\r\n* Microsoft.Bot.Builder.Dialogs.Tests\r\n* Microsoft.Bot.Builder.TestBot.Json\r\n* \r\n\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nfrom typing import Callable, Awaitable\n\nfrom botbuilder.core import Middleware, TurnContext\n\n\nclass RegisterClassMiddleware(Middleware):\n \"\"\"\n Middleware for adding an object to or registering a service with the current turn context.\n \"\"\"\n\n def __init__(self, service):\n self.service = service\n\n async def on_turn(\n self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]\n ):\n # C# has TurnStateCollection with has overrides for adding items\n # to TurnState. Python does not. In C#'s case, there is an 'Add'\n # to handle adding object, and that uses the fully qualified class name.\n context.turn_state[self.fullname(self.service)] = self.service\n await logic()\n\n @staticmethod\n def fullname(obj):\n module = obj.__class__.__module__\n if module is None or module == str.__class__.__module__:\n return obj.__class__.__name__ # Avoid reporting __builtin__\n return module + \".\" + obj.__class__.__name__\n", "path": "libraries/botbuilder-core/botbuilder/core/register_class_middleware.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nfrom botbuilder.core import (\n BotAdapter,\n Storage,\n RegisterClassMiddleware,\n UserState,\n ConversationState,\n AutoSaveStateMiddleware,\n)\n\n\nclass AdapterExtensions:\n @staticmethod\n def use_storage(adapter: BotAdapter, storage: Storage) -> BotAdapter:\n \"\"\"\n Registers a storage layer with the adapter. The storage object will be available via the turn context's\n `turn_state` property.\n\n :param adapter: The BotAdapter on which to register the storage object.\n :param storage: The Storage object to register.\n :return: The BotAdapter\n \"\"\"\n return adapter.use(RegisterClassMiddleware(storage))\n\n @staticmethod\n def use_state(\n adapter: BotAdapter,\n user_state: UserState,\n conversation_state: ConversationState,\n auto: bool = True,\n ) -> BotAdapter:\n \"\"\"\n Registers user and conversation state objects with the adapter. These objects will be available via\n the turn context's `turn_state` property.\n\n :param adapter: The BotAdapter on which to register the state objects.\n :param user_state: The UserState object to register.\n :param conversation_state: The ConversationState object to register.\n :param auto: True to automatically persist state each turn.\n :return: The BotAdapter\n \"\"\"\n if not adapter:\n raise TypeError(\"BotAdapter is required\")\n\n if not user_state:\n raise TypeError(\"UserState is required\")\n\n if not conversation_state:\n raise TypeError(\"ConversationState is required\")\n\n adapter.use(RegisterClassMiddleware(user_state))\n adapter.use(RegisterClassMiddleware(conversation_state))\n\n if auto:\n adapter.use(AutoSaveStateMiddleware([user_state, conversation_state]))\n\n return adapter\n", "path": "libraries/botbuilder-core/botbuilder/core/adapter_extensions.py"}]} | 1,546 | 910 |
gh_patches_debug_2897 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-8922 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CT-3210] [Bug] Error using `dbt list --select` when there is a cross-project model that is `version=0` in the parent project
### Is this a new bug in dbt-core?
- [X] I believe this is a new bug in dbt-core
- [X] I have searched the existing issues, and I could not find an existing issue for this bug
### Current Behavior
When you attempt to reference a model version 0, you get a stack trace error.
### Expected Behavior
We should allow you to set model version to be 0.
### Steps To Reproduce
1. On parent/hub project, add a versioned model with `v: 0`
2. On the child/spoke project, attempt to reference that versioned model in a model:
`select * from {{ ref('example_hub', 'my_second_dbt_model', v=0) }}`
3. run `dbt list --select anything`
Outstanding question - is this only affecting cross-project refs? Or all refs to a model with `v: 0`?
### Relevant log output
_No response_
### Environment
```markdown
- OS:
- Python:
- dbt:
```
### Which database adapter are you using with dbt?
_No response_
### Additional Context
_No response_
</issue>
<code>
[start of core/dbt/contracts/graph/node_args.py]
1 from dataclasses import dataclass, field
2 from datetime import datetime
3 from typing import Optional, List
4
5 from dbt.contracts.graph.unparsed import NodeVersion
6 from dbt.node_types import NodeType, AccessType
7
8
9 @dataclass
10 class ModelNodeArgs:
11 name: str
12 package_name: str
13 identifier: str
14 schema: str
15 database: Optional[str] = None
16 relation_name: Optional[str] = None
17 version: Optional[NodeVersion] = None
18 latest_version: Optional[NodeVersion] = None
19 deprecation_date: Optional[datetime] = None
20 access: Optional[str] = AccessType.Protected.value
21 generated_at: datetime = field(default_factory=datetime.utcnow)
22 depends_on_nodes: List[str] = field(default_factory=list)
23 enabled: bool = True
24
25 @property
26 def unique_id(self) -> str:
27 unique_id = f"{NodeType.Model}.{self.package_name}.{self.name}"
28 if self.version:
29 unique_id = f"{unique_id}.v{self.version}"
30
31 return unique_id
32
33 @property
34 def fqn(self) -> List[str]:
35 fqn = [self.package_name, self.name]
36 if self.version:
37 fqn.append(f"v{self.version}")
38
39 return fqn
40
[end of core/dbt/contracts/graph/node_args.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/dbt/contracts/graph/node_args.py b/core/dbt/contracts/graph/node_args.py
--- a/core/dbt/contracts/graph/node_args.py
+++ b/core/dbt/contracts/graph/node_args.py
@@ -33,7 +33,8 @@
@property
def fqn(self) -> List[str]:
fqn = [self.package_name, self.name]
- if self.version:
+ # Test for None explicitly because version can be 0
+ if self.version is not None:
fqn.append(f"v{self.version}")
return fqn
| {"golden_diff": "diff --git a/core/dbt/contracts/graph/node_args.py b/core/dbt/contracts/graph/node_args.py\n--- a/core/dbt/contracts/graph/node_args.py\n+++ b/core/dbt/contracts/graph/node_args.py\n@@ -33,7 +33,8 @@\n @property\n def fqn(self) -> List[str]:\n fqn = [self.package_name, self.name]\n- if self.version:\n+ # Test for None explicitly because version can be 0\n+ if self.version is not None:\n fqn.append(f\"v{self.version}\")\n \n return fqn\n", "issue": "[CT-3210] [Bug] Error using `dbt list --select` when there is a cross-project model that is `version=0` in the parent project\n### Is this a new bug in dbt-core?\r\n\r\n- [X] I believe this is a new bug in dbt-core\r\n- [X] I have searched the existing issues, and I could not find an existing issue for this bug\r\n\r\n### Current Behavior\r\n\r\nWhen you attempt to reference a model version 0, you get a stack trace error.\r\n\r\n\r\n\r\n### Expected Behavior\r\n\r\nWe should allow you to set model version to be 0.\r\n\r\n### Steps To Reproduce\r\n\r\n1. On parent/hub project, add a versioned model with `v: 0`\r\n2. On the child/spoke project, attempt to reference that versioned model in a model:\r\n `select * from {{ ref('example_hub', 'my_second_dbt_model', v=0) }}`\r\n3. run `dbt list --select anything`\r\n\r\nOutstanding question - is this only affecting cross-project refs? Or all refs to a model with `v: 0`?\r\n\r\n### Relevant log output\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n```markdown\r\n- OS:\r\n- Python:\r\n- dbt:\r\n```\r\n\r\n\r\n### Which database adapter are you using with dbt?\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "from dataclasses import dataclass, field\nfrom datetime import datetime\nfrom typing import Optional, List\n\nfrom dbt.contracts.graph.unparsed import NodeVersion\nfrom dbt.node_types import NodeType, AccessType\n\n\n@dataclass\nclass ModelNodeArgs:\n name: str\n package_name: str\n identifier: str\n schema: str\n database: Optional[str] = None\n relation_name: Optional[str] = None\n version: Optional[NodeVersion] = None\n latest_version: Optional[NodeVersion] = None\n deprecation_date: Optional[datetime] = None\n access: Optional[str] = AccessType.Protected.value\n generated_at: datetime = field(default_factory=datetime.utcnow)\n depends_on_nodes: List[str] = field(default_factory=list)\n enabled: bool = True\n\n @property\n def unique_id(self) -> str:\n unique_id = f\"{NodeType.Model}.{self.package_name}.{self.name}\"\n if self.version:\n unique_id = f\"{unique_id}.v{self.version}\"\n\n return unique_id\n\n @property\n def fqn(self) -> List[str]:\n fqn = [self.package_name, self.name]\n if self.version:\n fqn.append(f\"v{self.version}\")\n\n return fqn\n", "path": "core/dbt/contracts/graph/node_args.py"}]} | 1,182 | 132 |
gh_patches_debug_2955 | rasdani/github-patches | git_diff | facebookresearch__hydra-2729 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CI failing: `./tools/configen/configen/utils.py:4:1: F401 'typing.Tuple' imported but unused`
```
./tools/configen/configen/utils.py:4:1: F401 'typing.Tuple' imported but unused
nox > [2023-07-24 22:16:52,631] Command flake8 --config .flake8 failed with exit code 1
nox > [2023-07-24 22:16:52,632] Session lint-3.10 failed.
```
</issue>
<code>
[start of tools/configen/configen/utils.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import sys
3 from enum import Enum
4 from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
5
6 from omegaconf._utils import (
7 _resolve_optional,
8 get_dict_key_value_types,
9 get_list_element_type,
10 is_dict_annotation,
11 is_list_annotation,
12 is_primitive_type_annotation,
13 )
14
15
16 # borrowed from OmegaConf
17 def type_str(t: Any) -> str:
18 is_optional, t = _resolve_optional(t)
19 if t is None:
20 return type(t).__name__
21 if t is Any:
22 return "Any"
23 if t is ...:
24 return "..."
25
26 if sys.version_info < (3, 7, 0): # pragma: no cover
27 # Python 3.6
28 if hasattr(t, "__name__"):
29 name = str(t.__name__)
30 else:
31 if t.__origin__ is not None:
32 name = type_str(t.__origin__)
33 else:
34 name = str(t)
35 if name.startswith("typing."):
36 name = name[len("typing.") :]
37 else: # pragma: no cover
38 # Python >= 3.7
39 if hasattr(t, "__name__"):
40 name = str(t.__name__)
41 else:
42 if t._name is None:
43 if t.__origin__ is not None:
44 name = type_str(t.__origin__)
45 else:
46 name = str(t._name)
47
48 args = getattr(t, "__args__", None)
49 if args is not None:
50 args = ", ".join(type_str(t) for t in t.__args__)
51 ret = f"{name}[{args}]"
52 else:
53 ret = name
54 if is_optional:
55 return f"Optional[{ret}]"
56 else:
57 return ret
58
59
60 def is_tuple_annotation(type_: Any) -> bool:
61 origin = getattr(type_, "__origin__", None)
62 return origin is tuple
63
64
65 def convert_imports(imports: Set[Any], string_imports: Iterable[str]) -> List[str]:
66 tmp = set()
67 for imp in string_imports:
68 tmp.add(imp)
69 for t in imports:
70 s = None
71 origin = getattr(t, "__origin__", None)
72 if t is Any:
73 classname = "Any"
74 elif t is Optional:
75 classname = "Optional"
76 else:
77 if origin is list:
78 classname = "List"
79 elif origin is tuple:
80 classname = "Tuple"
81 elif origin is dict:
82 classname = "Dict"
83 else:
84 classname = t.__name__
85
86 if not is_primitive_type_annotation(t) or issubclass(t, Enum):
87 s = f"from {t.__module__} import {classname}"
88
89 if s is not None:
90 tmp.add(s)
91 return sorted(list(tmp))
92
93
94 def collect_imports(imports: Set[Any], type_: Any) -> None:
95 if is_list_annotation(type_):
96 collect_imports(imports, get_list_element_type(type_))
97 type_ = List
98 elif is_dict_annotation(type_):
99 kvt = get_dict_key_value_types(type_)
100 collect_imports(imports, kvt[0])
101 collect_imports(imports, kvt[1])
102 type_ = Dict
103 else:
104 is_optional = _resolve_optional(type_)[0]
105 if is_optional and type_ is not Any:
106 type_ = Optional
107 imports.add(type_)
108
[end of tools/configen/configen/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/configen/configen/utils.py b/tools/configen/configen/utils.py
--- a/tools/configen/configen/utils.py
+++ b/tools/configen/configen/utils.py
@@ -1,7 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
from enum import Enum
-from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
+from typing import Any, Dict, Iterable, List, Optional, Set
from omegaconf._utils import (
_resolve_optional,
| {"golden_diff": "diff --git a/tools/configen/configen/utils.py b/tools/configen/configen/utils.py\n--- a/tools/configen/configen/utils.py\n+++ b/tools/configen/configen/utils.py\n@@ -1,7 +1,7 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n import sys\n from enum import Enum\n-from typing import Any, Dict, Iterable, List, Optional, Set, Tuple\n+from typing import Any, Dict, Iterable, List, Optional, Set\n \n from omegaconf._utils import (\n _resolve_optional,\n", "issue": "CI failing: `./tools/configen/configen/utils.py:4:1: F401 'typing.Tuple' imported but unused`\n```\r\n./tools/configen/configen/utils.py:4:1: F401 'typing.Tuple' imported but unused\r\nnox > [2023-07-24 22:16:52,631] Command flake8 --config .flake8 failed with exit code 1\r\nnox > [2023-07-24 22:16:52,632] Session lint-3.10 failed.\r\n```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport sys\nfrom enum import Enum\nfrom typing import Any, Dict, Iterable, List, Optional, Set, Tuple\n\nfrom omegaconf._utils import (\n _resolve_optional,\n get_dict_key_value_types,\n get_list_element_type,\n is_dict_annotation,\n is_list_annotation,\n is_primitive_type_annotation,\n)\n\n\n# borrowed from OmegaConf\ndef type_str(t: Any) -> str:\n is_optional, t = _resolve_optional(t)\n if t is None:\n return type(t).__name__\n if t is Any:\n return \"Any\"\n if t is ...:\n return \"...\"\n\n if sys.version_info < (3, 7, 0): # pragma: no cover\n # Python 3.6\n if hasattr(t, \"__name__\"):\n name = str(t.__name__)\n else:\n if t.__origin__ is not None:\n name = type_str(t.__origin__)\n else:\n name = str(t)\n if name.startswith(\"typing.\"):\n name = name[len(\"typing.\") :]\n else: # pragma: no cover\n # Python >= 3.7\n if hasattr(t, \"__name__\"):\n name = str(t.__name__)\n else:\n if t._name is None:\n if t.__origin__ is not None:\n name = type_str(t.__origin__)\n else:\n name = str(t._name)\n\n args = getattr(t, \"__args__\", None)\n if args is not None:\n args = \", \".join(type_str(t) for t in t.__args__)\n ret = f\"{name}[{args}]\"\n else:\n ret = name\n if is_optional:\n return f\"Optional[{ret}]\"\n else:\n return ret\n\n\ndef is_tuple_annotation(type_: Any) -> bool:\n origin = getattr(type_, \"__origin__\", None)\n return origin is tuple\n\n\ndef convert_imports(imports: Set[Any], string_imports: Iterable[str]) -> List[str]:\n tmp = set()\n for imp in string_imports:\n tmp.add(imp)\n for t in imports:\n s = None\n origin = getattr(t, \"__origin__\", None)\n if t is Any:\n classname = \"Any\"\n elif t is Optional:\n classname = \"Optional\"\n else:\n if origin is list:\n classname = \"List\"\n elif origin is tuple:\n classname = \"Tuple\"\n elif origin is dict:\n classname = \"Dict\"\n else:\n classname = t.__name__\n\n if not is_primitive_type_annotation(t) or issubclass(t, Enum):\n s = f\"from {t.__module__} import {classname}\"\n\n if s is not None:\n tmp.add(s)\n return sorted(list(tmp))\n\n\ndef collect_imports(imports: Set[Any], type_: Any) -> None:\n if is_list_annotation(type_):\n collect_imports(imports, get_list_element_type(type_))\n type_ = List\n elif is_dict_annotation(type_):\n kvt = get_dict_key_value_types(type_)\n collect_imports(imports, kvt[0])\n collect_imports(imports, kvt[1])\n type_ = Dict\n else:\n is_optional = _resolve_optional(type_)[0]\n if is_optional and type_ is not Any:\n type_ = Optional\n imports.add(type_)\n", "path": "tools/configen/configen/utils.py"}]} | 1,646 | 120 |
gh_patches_debug_37823 | rasdani/github-patches | git_diff | nipy__nipype-3194 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NiftyReg interface raises ValueError: invalid version number
### Summary
Version number is "invalid" when I try to create a `RegAladin` node.
### Actual behavior
```python-traceback
C:\Users\fernando\tmp
(vesseg) λ python r.py
180612-15:47:29,796 interface WARNING:
version_from_command member of CommandLine was Deprecated in nipype-1.0.0 and deleted in 1.1.0
Traceback (most recent call last):
File "r.py", line 7, in <module>
aladin = niftyreg.RegAladin()
File "C:\Users\fernando\Miniconda3\envs\vesseg\lib\site-packages\nipype\interfaces\niftyreg\base.py", line 66, in __init__
StrictVersion(_version) < StrictVersion(self._min_version):
File "C:\Users\fernando\Miniconda3\envs\vesseg\lib\distutils\version.py", line 40, in __init__
self.parse(vstring)
File "C:\Users\fernando\Miniconda3\envs\vesseg\lib\distutils\version.py", line 137, in parse
raise ValueError("invalid version number '%s'" % vstring)
ValueError: invalid version number '1.5.58
'
```
I have temporarily hacked this by replacing [this line](https://github.com/nipy/nipype/blob/master/nipype/interfaces/niftyreg/base.py#L59):
```python
_version = self.version_from_command()
```
by:
```python
_version = bytes('1.5.58', "utf-8")
```
### How to replicate the behavior
Run
```python
from nipype.interfaces import niftyreg
aladin = niftyreg.RegAladin()
```
### Platform details:
```python
In [1]: import nipype
In [2]: nipype.get_info()
Out[2]:
{'pkg_path': 'C:\\Users\\fernando\\Miniconda3\\envs\\vesseg\\lib\\site-packages\\nipype',
'commit_source': 'archive substitution',
'commit_hash': '%h',
'nipype_version': '1.0.4',
'sys_version': '3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)]',
'sys_executable': 'C:\\Users\\fernando\\Miniconda3\\envs\\vesseg\\python.exe',
'sys_platform': 'win32',
'numpy_version': '1.14.3',
'scipy_version': '1.1.0',
'networkx_version': '2.1',
'nibabel_version': '2.2.1',
'traits_version': '4.6.0'}
In [3]: nipype.__version__
Out[3]: '1.0.4'
```
### Execution environment
I'm not sure what this means. I'm running Python from a `conda` environment.
NiftyReg interface raises ValueError: invalid version number
### Summary
Version number is "invalid" when I try to create a `RegAladin` node.
### Actual behavior
```python-traceback
C:\Users\fernando\tmp
(vesseg) λ python r.py
180612-15:47:29,796 interface WARNING:
version_from_command member of CommandLine was Deprecated in nipype-1.0.0 and deleted in 1.1.0
Traceback (most recent call last):
File "r.py", line 7, in <module>
aladin = niftyreg.RegAladin()
File "C:\Users\fernando\Miniconda3\envs\vesseg\lib\site-packages\nipype\interfaces\niftyreg\base.py", line 66, in __init__
StrictVersion(_version) < StrictVersion(self._min_version):
File "C:\Users\fernando\Miniconda3\envs\vesseg\lib\distutils\version.py", line 40, in __init__
self.parse(vstring)
File "C:\Users\fernando\Miniconda3\envs\vesseg\lib\distutils\version.py", line 137, in parse
raise ValueError("invalid version number '%s'" % vstring)
ValueError: invalid version number '1.5.58
'
```
I have temporarily hacked this by replacing [this line](https://github.com/nipy/nipype/blob/master/nipype/interfaces/niftyreg/base.py#L59):
```python
_version = self.version_from_command()
```
by:
```python
_version = bytes('1.5.58', "utf-8")
```
### How to replicate the behavior
Run
```python
from nipype.interfaces import niftyreg
aladin = niftyreg.RegAladin()
```
### Platform details:
```python
In [1]: import nipype
In [2]: nipype.get_info()
Out[2]:
{'pkg_path': 'C:\\Users\\fernando\\Miniconda3\\envs\\vesseg\\lib\\site-packages\\nipype',
'commit_source': 'archive substitution',
'commit_hash': '%h',
'nipype_version': '1.0.4',
'sys_version': '3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)]',
'sys_executable': 'C:\\Users\\fernando\\Miniconda3\\envs\\vesseg\\python.exe',
'sys_platform': 'win32',
'numpy_version': '1.14.3',
'scipy_version': '1.1.0',
'networkx_version': '2.1',
'nibabel_version': '2.2.1',
'traits_version': '4.6.0'}
In [3]: nipype.__version__
Out[3]: '1.0.4'
```
### Execution environment
I'm not sure what this means. I'm running Python from a `conda` environment.
</issue>
<code>
[start of nipype/interfaces/niftyreg/base.py]
1 # -*- coding: utf-8 -*-
2 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
3 # vi: set ft=python sts=4 ts=4 sw=4 et:
4 """
5 The niftyreg module provides classes for interfacing with `niftyreg
6 <http://sourceforge.net/projects/niftyreg/>`_ command line tools.
7
8 These are the base tools for working with niftyreg.
9
10 Registration tools are found in niftyreg/reg.py
11 Every other tool is found in niftyreg/regutils.py
12
13 Examples
14 --------
15 See the docstrings of the individual classes for examples.
16
17 """
18 from distutils.version import StrictVersion
19 import os
20
21 from ... import logging
22 from ..base import CommandLine, CommandLineInputSpec, traits, Undefined
23 from ...utils.filemanip import split_filename
24
25 iflogger = logging.getLogger("nipype.interface")
26
27
28 def get_custom_path(command, env_dir="NIFTYREGDIR"):
29 return os.path.join(os.getenv(env_dir, ""), command)
30
31
32 class NiftyRegCommandInputSpec(CommandLineInputSpec):
33 """Input Spec for niftyreg interfaces."""
34
35 # Set the number of omp thread to use
36 omp_core_val = traits.Int(
37 int(os.environ.get("OMP_NUM_THREADS", "1")),
38 desc="Number of openmp thread to use",
39 argstr="-omp %i",
40 usedefault=True,
41 )
42
43
44 class NiftyRegCommand(CommandLine):
45 """
46 Base support interface for NiftyReg commands.
47 """
48
49 _suffix = "_nr"
50 _min_version = "1.5.30"
51
52 input_spec = NiftyRegCommandInputSpec
53
54 def __init__(self, required_version=None, **inputs):
55 self.num_threads = 1
56 super(NiftyRegCommand, self).__init__(**inputs)
57 self.required_version = required_version
58 _version = self.version_from_command()
59 if _version:
60 _version = _version.decode("utf-8")
61 if self._min_version is not None and StrictVersion(
62 _version
63 ) < StrictVersion(self._min_version):
64 msg = "A later version of Niftyreg is required (%s < %s)"
65 iflogger.warning(msg, _version, self._min_version)
66 if required_version is not None:
67 if StrictVersion(_version) != StrictVersion(required_version):
68 msg = "The version of NiftyReg differs from the required"
69 msg += "(%s != %s)"
70 iflogger.warning(msg, _version, self.required_version)
71 self.inputs.on_trait_change(self._omp_update, "omp_core_val")
72 self.inputs.on_trait_change(self._environ_update, "environ")
73 self._omp_update()
74
75 def _omp_update(self):
76 if self.inputs.omp_core_val:
77 self.inputs.environ["OMP_NUM_THREADS"] = str(self.inputs.omp_core_val)
78 self.num_threads = self.inputs.omp_core_val
79 else:
80 if "OMP_NUM_THREADS" in self.inputs.environ:
81 del self.inputs.environ["OMP_NUM_THREADS"]
82 self.num_threads = 1
83
84 def _environ_update(self):
85 if self.inputs.environ:
86 if "OMP_NUM_THREADS" in self.inputs.environ:
87 self.inputs.omp_core_val = int(self.inputs.environ["OMP_NUM_THREADS"])
88 else:
89 self.inputs.omp_core_val = Undefined
90 else:
91 self.inputs.omp_core_val = Undefined
92
93 def check_version(self):
94 _version = self.version_from_command()
95 if not _version:
96 raise Exception("Niftyreg not found")
97 # Decoding to string:
98 _version = _version.decode("utf-8")
99 if StrictVersion(_version) < StrictVersion(self._min_version):
100 err = "A later version of Niftyreg is required (%s < %s)"
101 raise ValueError(err % (_version, self._min_version))
102 if self.required_version:
103 if StrictVersion(_version) != StrictVersion(self.required_version):
104 err = "The version of NiftyReg differs from the required"
105 err += "(%s != %s)"
106 raise ValueError(err % (_version, self.required_version))
107
108 @property
109 def version(self):
110 return self.version_from_command()
111
112 def exists(self):
113 return self.version_from_command() is not None
114
115 def _format_arg(self, name, spec, value):
116 if name == "omp_core_val":
117 self.numthreads = value
118 return super(NiftyRegCommand, self)._format_arg(name, spec, value)
119
120 def _gen_fname(self, basename, out_dir=None, suffix=None, ext=None):
121 if basename == "":
122 msg = "Unable to generate filename for command %s. " % self.cmd
123 msg += "basename is not set!"
124 raise ValueError(msg)
125 _, final_bn, final_ext = split_filename(basename)
126 if out_dir is None:
127 out_dir = os.getcwd()
128 if ext is not None:
129 final_ext = ext
130 if suffix is not None:
131 final_bn = "".join((final_bn, suffix))
132 return os.path.abspath(os.path.join(out_dir, final_bn + final_ext))
133
[end of nipype/interfaces/niftyreg/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nipype/interfaces/niftyreg/base.py b/nipype/interfaces/niftyreg/base.py
--- a/nipype/interfaces/niftyreg/base.py
+++ b/nipype/interfaces/niftyreg/base.py
@@ -19,7 +19,8 @@
import os
from ... import logging
-from ..base import CommandLine, CommandLineInputSpec, traits, Undefined
+from ..base import (CommandLine, CommandLineInputSpec, traits, Undefined,
+ PackageInfo)
from ...utils.filemanip import split_filename
iflogger = logging.getLogger("nipype.interface")
@@ -29,6 +30,14 @@
return os.path.join(os.getenv(env_dir, ""), command)
+class Info(PackageInfo):
+ version_cmd = get_custom_path('reg_aladin') + ' --version'
+
+ @staticmethod
+ def parse_version(raw_info):
+ return raw_info
+
+
class NiftyRegCommandInputSpec(CommandLineInputSpec):
"""Input Spec for niftyreg interfaces."""
@@ -55,9 +64,8 @@
self.num_threads = 1
super(NiftyRegCommand, self).__init__(**inputs)
self.required_version = required_version
- _version = self.version_from_command()
+ _version = self.version
if _version:
- _version = _version.decode("utf-8")
if self._min_version is not None and StrictVersion(
_version
) < StrictVersion(self._min_version):
@@ -91,11 +99,9 @@
self.inputs.omp_core_val = Undefined
def check_version(self):
- _version = self.version_from_command()
+ _version = self.version
if not _version:
raise Exception("Niftyreg not found")
- # Decoding to string:
- _version = _version.decode("utf-8")
if StrictVersion(_version) < StrictVersion(self._min_version):
err = "A later version of Niftyreg is required (%s < %s)"
raise ValueError(err % (_version, self._min_version))
@@ -107,10 +113,10 @@
@property
def version(self):
- return self.version_from_command()
+ return Info.version()
def exists(self):
- return self.version_from_command() is not None
+ return self.version is not None
def _format_arg(self, name, spec, value):
if name == "omp_core_val":
| {"golden_diff": "diff --git a/nipype/interfaces/niftyreg/base.py b/nipype/interfaces/niftyreg/base.py\n--- a/nipype/interfaces/niftyreg/base.py\n+++ b/nipype/interfaces/niftyreg/base.py\n@@ -19,7 +19,8 @@\n import os\n \n from ... import logging\n-from ..base import CommandLine, CommandLineInputSpec, traits, Undefined\n+from ..base import (CommandLine, CommandLineInputSpec, traits, Undefined,\n+ PackageInfo)\n from ...utils.filemanip import split_filename\n \n iflogger = logging.getLogger(\"nipype.interface\")\n@@ -29,6 +30,14 @@\n return os.path.join(os.getenv(env_dir, \"\"), command)\n \n \n+class Info(PackageInfo):\n+ version_cmd = get_custom_path('reg_aladin') + ' --version'\n+\n+ @staticmethod\n+ def parse_version(raw_info):\n+ return raw_info\n+\n+\n class NiftyRegCommandInputSpec(CommandLineInputSpec):\n \"\"\"Input Spec for niftyreg interfaces.\"\"\"\n \n@@ -55,9 +64,8 @@\n self.num_threads = 1\n super(NiftyRegCommand, self).__init__(**inputs)\n self.required_version = required_version\n- _version = self.version_from_command()\n+ _version = self.version\n if _version:\n- _version = _version.decode(\"utf-8\")\n if self._min_version is not None and StrictVersion(\n _version\n ) < StrictVersion(self._min_version):\n@@ -91,11 +99,9 @@\n self.inputs.omp_core_val = Undefined\n \n def check_version(self):\n- _version = self.version_from_command()\n+ _version = self.version\n if not _version:\n raise Exception(\"Niftyreg not found\")\n- # Decoding to string:\n- _version = _version.decode(\"utf-8\")\n if StrictVersion(_version) < StrictVersion(self._min_version):\n err = \"A later version of Niftyreg is required (%s < %s)\"\n raise ValueError(err % (_version, self._min_version))\n@@ -107,10 +113,10 @@\n \n @property\n def version(self):\n- return self.version_from_command()\n+ return Info.version()\n \n def exists(self):\n- return self.version_from_command() is not None\n+ return self.version is not None\n \n def _format_arg(self, name, spec, value):\n if name == \"omp_core_val\":\n", "issue": "NiftyReg interface raises ValueError: invalid version number\n### Summary\r\nVersion number is \"invalid\" when I try to create a `RegAladin` node.\r\n\r\n### Actual behavior\r\n```python-traceback\r\nC:\\Users\\fernando\\tmp\r\n(vesseg) \u03bb python r.py\r\n180612-15:47:29,796 interface WARNING:\r\n version_from_command member of CommandLine was Deprecated in nipype-1.0.0 and deleted in 1.1.0\r\nTraceback (most recent call last):\r\n File \"r.py\", line 7, in <module>\r\n aladin = niftyreg.RegAladin()\r\n File \"C:\\Users\\fernando\\Miniconda3\\envs\\vesseg\\lib\\site-packages\\nipype\\interfaces\\niftyreg\\base.py\", line 66, in __init__\r\n StrictVersion(_version) < StrictVersion(self._min_version):\r\n File \"C:\\Users\\fernando\\Miniconda3\\envs\\vesseg\\lib\\distutils\\version.py\", line 40, in __init__\r\n self.parse(vstring)\r\n File \"C:\\Users\\fernando\\Miniconda3\\envs\\vesseg\\lib\\distutils\\version.py\", line 137, in parse\r\n raise ValueError(\"invalid version number '%s'\" % vstring)\r\nValueError: invalid version number '1.5.58\r\n'\r\n```\r\n\r\nI have temporarily hacked this by replacing [this line](https://github.com/nipy/nipype/blob/master/nipype/interfaces/niftyreg/base.py#L59):\r\n```python\r\n_version = self.version_from_command()\r\n```\r\nby:\r\n```python\r\n_version = bytes('1.5.58', \"utf-8\")\r\n```\r\n\r\n### How to replicate the behavior\r\nRun\r\n```python\r\nfrom nipype.interfaces import niftyreg\r\naladin = niftyreg.RegAladin()\r\n```\r\n\r\n### Platform details:\r\n```python\r\nIn [1]: import nipype\r\n\r\nIn [2]: nipype.get_info()\r\nOut[2]:\r\n{'pkg_path': 'C:\\\\Users\\\\fernando\\\\Miniconda3\\\\envs\\\\vesseg\\\\lib\\\\site-packages\\\\nipype',\r\n 'commit_source': 'archive substitution',\r\n 'commit_hash': '%h',\r\n 'nipype_version': '1.0.4',\r\n 'sys_version': '3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)]',\r\n 'sys_executable': 'C:\\\\Users\\\\fernando\\\\Miniconda3\\\\envs\\\\vesseg\\\\python.exe',\r\n 'sys_platform': 'win32',\r\n 'numpy_version': '1.14.3',\r\n 'scipy_version': '1.1.0',\r\n 'networkx_version': '2.1',\r\n 'nibabel_version': '2.2.1',\r\n 'traits_version': '4.6.0'}\r\n\r\nIn [3]: nipype.__version__\r\nOut[3]: '1.0.4'\r\n```\r\n\r\n### Execution environment\r\nI'm not sure what this means. I'm running Python from a `conda` environment.\r\n\r\n\nNiftyReg interface raises ValueError: invalid version number\n### Summary\r\nVersion number is \"invalid\" when I try to create a `RegAladin` node.\r\n\r\n### Actual behavior\r\n```python-traceback\r\nC:\\Users\\fernando\\tmp\r\n(vesseg) \u03bb python r.py\r\n180612-15:47:29,796 interface WARNING:\r\n version_from_command member of CommandLine was Deprecated in nipype-1.0.0 and deleted in 1.1.0\r\nTraceback (most recent call last):\r\n File \"r.py\", line 7, in <module>\r\n aladin = niftyreg.RegAladin()\r\n File \"C:\\Users\\fernando\\Miniconda3\\envs\\vesseg\\lib\\site-packages\\nipype\\interfaces\\niftyreg\\base.py\", line 66, in __init__\r\n StrictVersion(_version) < StrictVersion(self._min_version):\r\n File \"C:\\Users\\fernando\\Miniconda3\\envs\\vesseg\\lib\\distutils\\version.py\", line 40, in __init__\r\n self.parse(vstring)\r\n File \"C:\\Users\\fernando\\Miniconda3\\envs\\vesseg\\lib\\distutils\\version.py\", line 137, in parse\r\n raise ValueError(\"invalid version number '%s'\" % vstring)\r\nValueError: invalid version number '1.5.58\r\n'\r\n```\r\n\r\nI have temporarily hacked this by replacing [this line](https://github.com/nipy/nipype/blob/master/nipype/interfaces/niftyreg/base.py#L59):\r\n```python\r\n_version = self.version_from_command()\r\n```\r\nby:\r\n```python\r\n_version = bytes('1.5.58', \"utf-8\")\r\n```\r\n\r\n### How to replicate the behavior\r\nRun\r\n```python\r\nfrom nipype.interfaces import niftyreg\r\naladin = niftyreg.RegAladin()\r\n```\r\n\r\n### Platform details:\r\n```python\r\nIn [1]: import nipype\r\n\r\nIn [2]: nipype.get_info()\r\nOut[2]:\r\n{'pkg_path': 'C:\\\\Users\\\\fernando\\\\Miniconda3\\\\envs\\\\vesseg\\\\lib\\\\site-packages\\\\nipype',\r\n 'commit_source': 'archive substitution',\r\n 'commit_hash': '%h',\r\n 'nipype_version': '1.0.4',\r\n 'sys_version': '3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)]',\r\n 'sys_executable': 'C:\\\\Users\\\\fernando\\\\Miniconda3\\\\envs\\\\vesseg\\\\python.exe',\r\n 'sys_platform': 'win32',\r\n 'numpy_version': '1.14.3',\r\n 'scipy_version': '1.1.0',\r\n 'networkx_version': '2.1',\r\n 'nibabel_version': '2.2.1',\r\n 'traits_version': '4.6.0'}\r\n\r\nIn [3]: nipype.__version__\r\nOut[3]: '1.0.4'\r\n```\r\n\r\n### Execution environment\r\nI'm not sure what this means. I'm running Python from a `conda` environment.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nThe niftyreg module provides classes for interfacing with `niftyreg\n<http://sourceforge.net/projects/niftyreg/>`_ command line tools.\n\nThese are the base tools for working with niftyreg.\n\nRegistration tools are found in niftyreg/reg.py\nEvery other tool is found in niftyreg/regutils.py\n\nExamples\n--------\nSee the docstrings of the individual classes for examples.\n\n\"\"\"\nfrom distutils.version import StrictVersion\nimport os\n\nfrom ... import logging\nfrom ..base import CommandLine, CommandLineInputSpec, traits, Undefined\nfrom ...utils.filemanip import split_filename\n\niflogger = logging.getLogger(\"nipype.interface\")\n\n\ndef get_custom_path(command, env_dir=\"NIFTYREGDIR\"):\n return os.path.join(os.getenv(env_dir, \"\"), command)\n\n\nclass NiftyRegCommandInputSpec(CommandLineInputSpec):\n \"\"\"Input Spec for niftyreg interfaces.\"\"\"\n\n # Set the number of omp thread to use\n omp_core_val = traits.Int(\n int(os.environ.get(\"OMP_NUM_THREADS\", \"1\")),\n desc=\"Number of openmp thread to use\",\n argstr=\"-omp %i\",\n usedefault=True,\n )\n\n\nclass NiftyRegCommand(CommandLine):\n \"\"\"\n Base support interface for NiftyReg commands.\n \"\"\"\n\n _suffix = \"_nr\"\n _min_version = \"1.5.30\"\n\n input_spec = NiftyRegCommandInputSpec\n\n def __init__(self, required_version=None, **inputs):\n self.num_threads = 1\n super(NiftyRegCommand, self).__init__(**inputs)\n self.required_version = required_version\n _version = self.version_from_command()\n if _version:\n _version = _version.decode(\"utf-8\")\n if self._min_version is not None and StrictVersion(\n _version\n ) < StrictVersion(self._min_version):\n msg = \"A later version of Niftyreg is required (%s < %s)\"\n iflogger.warning(msg, _version, self._min_version)\n if required_version is not None:\n if StrictVersion(_version) != StrictVersion(required_version):\n msg = \"The version of NiftyReg differs from the required\"\n msg += \"(%s != %s)\"\n iflogger.warning(msg, _version, self.required_version)\n self.inputs.on_trait_change(self._omp_update, \"omp_core_val\")\n self.inputs.on_trait_change(self._environ_update, \"environ\")\n self._omp_update()\n\n def _omp_update(self):\n if self.inputs.omp_core_val:\n self.inputs.environ[\"OMP_NUM_THREADS\"] = str(self.inputs.omp_core_val)\n self.num_threads = self.inputs.omp_core_val\n else:\n if \"OMP_NUM_THREADS\" in self.inputs.environ:\n del self.inputs.environ[\"OMP_NUM_THREADS\"]\n self.num_threads = 1\n\n def _environ_update(self):\n if self.inputs.environ:\n if \"OMP_NUM_THREADS\" in self.inputs.environ:\n self.inputs.omp_core_val = int(self.inputs.environ[\"OMP_NUM_THREADS\"])\n else:\n self.inputs.omp_core_val = Undefined\n else:\n self.inputs.omp_core_val = Undefined\n\n def check_version(self):\n _version = self.version_from_command()\n if not _version:\n raise Exception(\"Niftyreg not found\")\n # Decoding to string:\n _version = _version.decode(\"utf-8\")\n if StrictVersion(_version) < StrictVersion(self._min_version):\n err = \"A later version of Niftyreg is required (%s < %s)\"\n raise ValueError(err % (_version, self._min_version))\n if self.required_version:\n if StrictVersion(_version) != StrictVersion(self.required_version):\n err = \"The version of NiftyReg differs from the required\"\n err += \"(%s != %s)\"\n raise ValueError(err % (_version, self.required_version))\n\n @property\n def version(self):\n return self.version_from_command()\n\n def exists(self):\n return self.version_from_command() is not None\n\n def _format_arg(self, name, spec, value):\n if name == \"omp_core_val\":\n self.numthreads = value\n return super(NiftyRegCommand, self)._format_arg(name, spec, value)\n\n def _gen_fname(self, basename, out_dir=None, suffix=None, ext=None):\n if basename == \"\":\n msg = \"Unable to generate filename for command %s. \" % self.cmd\n msg += \"basename is not set!\"\n raise ValueError(msg)\n _, final_bn, final_ext = split_filename(basename)\n if out_dir is None:\n out_dir = os.getcwd()\n if ext is not None:\n final_ext = ext\n if suffix is not None:\n final_bn = \"\".join((final_bn, suffix))\n return os.path.abspath(os.path.join(out_dir, final_bn + final_ext))\n", "path": "nipype/interfaces/niftyreg/base.py"}]} | 3,347 | 545 |
gh_patches_debug_22401 | rasdani/github-patches | git_diff | talonhub__community-244 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
formatters should apply to selected text if they are spoken alone
for example `yeller` while some text is selected should pass the selected text through the formatter and replace the selected text with the formatted text.
</issue>
<code>
[start of code/formatters.py]
1 from talon import Module, Context, actions, ui, imgui
2 from talon.grammar import Phrase
3 from typing import List, Union
4
5 ctx = Context()
6 key = actions.key
7
8 words_to_keep_lowercase = "a,an,the,at,by,for,in,is,of,on,to,up,and,as,but,or,nor".split(
9 ","
10 )
11
12 # last_phrase has the last phrase spoken, WITHOUT formatting.
13 # This is needed for reformatting.
14 last_phrase = ""
15
16 # formatted_phrase_history keeps the most recent formatted phrases, WITH formatting.
17 formatted_phrase_history = []
18 formatted_phrase_history_length = 20
19
20
21 def surround(by):
22 def func(i, word, last):
23 if i == 0:
24 word = by + word
25 if last:
26 word += by
27 return word
28
29 return func
30
31
32 def format_phrase(m: Union[str, Phrase], fmtrs: str):
33 global last_phrase
34 last_phrase = m
35 words = []
36 if isinstance(m, str):
37 words = m.split(" ")
38 else:
39 if m.words[-1] == "over":
40 m.words = m.words[:-1]
41
42 words = actions.dictate.parse_words(m)
43 words = actions.dictate.replace_words(words)
44
45 result = format_phrase_no_history(words, fmtrs)
46
47 # Add result to history.
48 global formatted_phrase_history
49 formatted_phrase_history.insert(0, result)
50 formatted_phrase_history = formatted_phrase_history[
51 :formatted_phrase_history_length
52 ]
53
54 return result
55
56
57 def format_phrase_no_history(word_list, fmtrs: str):
58 fmtr_list = fmtrs.split(",")
59 words = []
60 spaces = True
61 for i, w in enumerate(word_list):
62 for name in reversed(fmtr_list):
63 smash, func = all_formatters[name]
64 w = func(i, w, i == len(word_list) - 1)
65 spaces = spaces and not smash
66 words.append(w)
67 sep = " " if spaces else ""
68 return sep.join(words)
69
70
71 NOSEP = True
72 SEP = False
73
74
75 def words_with_joiner(joiner):
76 """Pass through words unchanged, but add a separator between them."""
77
78 def formatter_function(i, word, _):
79 return word if i == 0 else joiner + word
80
81 return (NOSEP, formatter_function)
82
83
84 def first_vs_rest(first_func, rest_func=lambda w: w):
85 """Supply one or two transformer functions for the first and rest of
86 words respectively.
87
88 Leave second argument out if you want all but the first word to be passed
89 through unchanged.
90 Set first argument to None if you want the first word to be passed
91 through unchanged."""
92 if first_func is None:
93 first_func = lambda w: w
94
95 def formatter_function(i, word, _):
96 return first_func(word) if i == 0 else rest_func(word)
97
98 return formatter_function
99
100
101 def every_word(word_func):
102 """Apply one function to every word."""
103
104 def formatter_function(i, word, _):
105 return word_func(word)
106
107 return formatter_function
108
109
110 formatters_dict = {
111 "NOOP": (SEP, lambda i, word, _: word),
112 "DOUBLE_UNDERSCORE": (NOSEP, first_vs_rest(lambda w: "__%s__" % w)),
113 "PRIVATE_CAMEL_CASE": (NOSEP, first_vs_rest(lambda w: w, lambda w: w.capitalize())),
114 "PROTECTED_CAMEL_CASE": (
115 NOSEP,
116 first_vs_rest(lambda w: w, lambda w: w.capitalize()),
117 ),
118 "PUBLIC_CAMEL_CASE": (NOSEP, every_word(lambda w: w.capitalize())),
119 "SNAKE_CASE": (
120 NOSEP,
121 first_vs_rest(lambda w: w.lower(), lambda w: "_" + w.lower()),
122 ),
123 "NO_SPACES": (NOSEP, every_word(lambda w: w)),
124 "DASH_SEPARATED": words_with_joiner("-"),
125 "TERMINAL_DASH_SEPARATED": (
126 NOSEP,
127 first_vs_rest(lambda w: " --" + w.lower(), lambda w: "-" + w.lower()),
128 ),
129 "DOUBLE_COLON_SEPARATED": words_with_joiner("::"),
130 "ALL_CAPS": (SEP, every_word(lambda w: w.upper())),
131 "ALL_LOWERCASE": (SEP, every_word(lambda w: w.lower())),
132 "DOUBLE_QUOTED_STRING": (SEP, surround('"')),
133 "SINGLE_QUOTED_STRING": (SEP, surround("'")),
134 "SPACE_SURROUNDED_STRING": (SEP, surround(" ")),
135 "DOT_SEPARATED": words_with_joiner("."),
136 "DOT_SNAKE": (NOSEP, lambda i, word, _: "." + word if i == 0 else "_" + word),
137 "SLASH_SEPARATED": (NOSEP, every_word(lambda w: "/" + w)),
138 "CAPITALIZE_FIRST_WORD": (SEP, first_vs_rest(lambda w: w.capitalize())),
139 "CAPITALIZE_ALL_WORDS": (
140 SEP,
141 lambda i, word, _: word.capitalize()
142 if i == 0 or word not in words_to_keep_lowercase
143 else word,
144 ),
145 "FIRST_THREE": (NOSEP, lambda i, word, _: word[0:3]),
146 "FIRST_FOUR": (NOSEP, lambda i, word, _: word[0:4]),
147 "FIRST_FIVE": (NOSEP, lambda i, word, _: word[0:5]),
148 }
149
150 # This is the mapping from spoken phrases to formatters
151 formatters_words = {
152 "allcaps": formatters_dict["ALL_CAPS"],
153 "alldown": formatters_dict["ALL_LOWERCASE"],
154 "camel": formatters_dict["PRIVATE_CAMEL_CASE"],
155 "dotted": formatters_dict["DOT_SEPARATED"],
156 "dubstring": formatters_dict["DOUBLE_QUOTED_STRING"],
157 "dunder": formatters_dict["DOUBLE_UNDERSCORE"],
158 "hammer": formatters_dict["PUBLIC_CAMEL_CASE"],
159 "kebab": formatters_dict["DASH_SEPARATED"],
160 "packed": formatters_dict["DOUBLE_COLON_SEPARATED"],
161 "padded": formatters_dict["SPACE_SURROUNDED_STRING"],
162 # "say": formatters_dict["NOOP"],
163 "sentence": formatters_dict["CAPITALIZE_FIRST_WORD"],
164 "slasher": formatters_dict["SLASH_SEPARATED"],
165 "smash": formatters_dict["NO_SPACES"],
166 "snake": formatters_dict["SNAKE_CASE"],
167 # "speak": formatters_dict["NOOP"],
168 "string": formatters_dict["SINGLE_QUOTED_STRING"],
169 "title": formatters_dict["CAPITALIZE_ALL_WORDS"],
170 # disable a few formatters for now
171 # "tree": formatters_dict["FIRST_THREE"],
172 # "quad": formatters_dict["FIRST_FOUR"],
173 # "fiver": formatters_dict["FIRST_FIVE"],
174 }
175
176 all_formatters = {}
177 all_formatters.update(formatters_dict)
178 all_formatters.update(formatters_words)
179
180 mod = Module()
181 mod.list("formatters", desc="list of formatters")
182
183
184 @mod.capture
185 def formatters(m) -> str:
186 "Returns a comma-separated string of formatters e.g. 'SNAKE,DUBSTRING'"
187
188
189 @mod.capture
190 def format_text(m) -> str:
191 "Formats the text and returns a string"
192
193
194 @mod.action_class
195 class Actions:
196 def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:
197 """Formats a phrase according to formatters. formatters is a comma-separated string of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')"""
198 return format_phrase(phrase, formatters)
199
200 def formatters_help_toggle():
201 """Lists all formatters"""
202 if gui.showing:
203 gui.hide()
204 else:
205 gui.show()
206
207 def formatters_recent_toggle():
208 """Toggles list of recent formatters"""
209 if recent_gui.showing:
210 recent_gui.hide()
211 else:
212 recent_gui.show()
213
214 def formatters_recent_select(number: int):
215 """Inserts a recent formatter"""
216 if len(formatted_phrase_history) >= number:
217 return formatted_phrase_history[number - 1]
218 return ""
219
220 def formatters_clear_last():
221 """Clears the last formatted phrase"""
222 if len(formatted_phrase_history) > 0:
223 for character in formatted_phrase_history[0]:
224 actions.edit.delete()
225
226 def formatters_reformat_last(formatters: str) -> str:
227 """Reformats last formatted phrase"""
228 global last_phrase
229 return format_phrase(last_phrase, formatters)
230
231
232 @ctx.capture(rule="{self.formatters}+")
233 def formatters(m):
234 return ",".join(m.formatters_list)
235
236
237 @ctx.capture(rule="<self.formatters> <user.text>")
238 def format_text(m):
239 return format_phrase(m.text, m.formatters)
240
241
242 ctx.lists["self.formatters"] = formatters_words.keys()
243
244
245 @imgui.open(software=False)
246 def gui(gui: imgui.GUI):
247 gui.text("List formatters")
248 gui.line()
249 for name in sorted(set(formatters_words.keys())):
250 gui.text(f"{name} | {format_phrase_no_history(['one', 'two', 'three'], name)}")
251
252
253 @imgui.open(software=False)
254 def recent_gui(gui: imgui.GUI):
255 gui.text("Recent formatters")
256 gui.line()
257 for index, result in enumerate(formatted_phrase_history, 1):
258 gui.text("{}. {}".format(index, result))
259
[end of code/formatters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/code/formatters.py b/code/formatters.py
--- a/code/formatters.py
+++ b/code/formatters.py
@@ -1,9 +1,11 @@
from talon import Module, Context, actions, ui, imgui
from talon.grammar import Phrase
from typing import List, Union
+import re
ctx = Context()
key = actions.key
+edit = actions.edit
words_to_keep_lowercase = "a,an,the,at,by,for,in,is,of,on,to,up,and,as,but,or,nor".split(
","
@@ -228,6 +230,19 @@
global last_phrase
return format_phrase(last_phrase, formatters)
+ def formatters_reformat_selection(formatters: str) -> str:
+ """Reformats the current selection."""
+ selected = edit.selected_text()
+ unformatted = re.sub(r"[^a-zA-Z0-9]+", " ", selected).lower()
+ # TODO: Separate out camelcase & studleycase vars
+
+ # Delete separately for compatibility with programs that don't overwrite
+ # selected text (e.g. Emacs)
+ edit.delete()
+ text = actions.self.formatted_text(unformatted, formatters)
+ actions.insert(text)
+ return text
+
@ctx.capture(rule="{self.formatters}+")
def formatters(m):
| {"golden_diff": "diff --git a/code/formatters.py b/code/formatters.py\n--- a/code/formatters.py\n+++ b/code/formatters.py\n@@ -1,9 +1,11 @@\n from talon import Module, Context, actions, ui, imgui\n from talon.grammar import Phrase\n from typing import List, Union\n+import re\n \n ctx = Context()\n key = actions.key\n+edit = actions.edit\n \n words_to_keep_lowercase = \"a,an,the,at,by,for,in,is,of,on,to,up,and,as,but,or,nor\".split(\n \",\"\n@@ -228,6 +230,19 @@\n global last_phrase\n return format_phrase(last_phrase, formatters)\n \n+ def formatters_reformat_selection(formatters: str) -> str:\n+ \"\"\"Reformats the current selection.\"\"\"\n+ selected = edit.selected_text()\n+ unformatted = re.sub(r\"[^a-zA-Z0-9]+\", \" \", selected).lower()\n+ # TODO: Separate out camelcase & studleycase vars\n+\n+ # Delete separately for compatibility with programs that don't overwrite\n+ # selected text (e.g. Emacs)\n+ edit.delete()\n+ text = actions.self.formatted_text(unformatted, formatters)\n+ actions.insert(text)\n+ return text\n+\n \n @ctx.capture(rule=\"{self.formatters}+\")\n def formatters(m):\n", "issue": "formatters should apply to selected text if they are spoken alone\nfor example `yeller` while some text is selected should pass the selected text through the formatter and replace the selected text with the formatted text.\n", "before_files": [{"content": "from talon import Module, Context, actions, ui, imgui\nfrom talon.grammar import Phrase\nfrom typing import List, Union\n\nctx = Context()\nkey = actions.key\n\nwords_to_keep_lowercase = \"a,an,the,at,by,for,in,is,of,on,to,up,and,as,but,or,nor\".split(\n \",\"\n)\n\n# last_phrase has the last phrase spoken, WITHOUT formatting.\n# This is needed for reformatting.\nlast_phrase = \"\"\n\n# formatted_phrase_history keeps the most recent formatted phrases, WITH formatting.\nformatted_phrase_history = []\nformatted_phrase_history_length = 20\n\n\ndef surround(by):\n def func(i, word, last):\n if i == 0:\n word = by + word\n if last:\n word += by\n return word\n\n return func\n\n\ndef format_phrase(m: Union[str, Phrase], fmtrs: str):\n global last_phrase\n last_phrase = m\n words = []\n if isinstance(m, str):\n words = m.split(\" \")\n else:\n if m.words[-1] == \"over\":\n m.words = m.words[:-1]\n\n words = actions.dictate.parse_words(m)\n words = actions.dictate.replace_words(words)\n\n result = format_phrase_no_history(words, fmtrs)\n\n # Add result to history.\n global formatted_phrase_history\n formatted_phrase_history.insert(0, result)\n formatted_phrase_history = formatted_phrase_history[\n :formatted_phrase_history_length\n ]\n\n return result\n\n\ndef format_phrase_no_history(word_list, fmtrs: str):\n fmtr_list = fmtrs.split(\",\")\n words = []\n spaces = True\n for i, w in enumerate(word_list):\n for name in reversed(fmtr_list):\n smash, func = all_formatters[name]\n w = func(i, w, i == len(word_list) - 1)\n spaces = spaces and not smash\n words.append(w)\n sep = \" \" if spaces else \"\"\n return sep.join(words)\n\n\nNOSEP = True\nSEP = False\n\n\ndef words_with_joiner(joiner):\n \"\"\"Pass through words unchanged, but add a separator between them.\"\"\"\n\n def formatter_function(i, word, _):\n return word if i == 0 else joiner + word\n\n return (NOSEP, formatter_function)\n\n\ndef first_vs_rest(first_func, rest_func=lambda w: w):\n \"\"\"Supply one or two transformer functions for the first and rest of\n words respectively.\n\n Leave second argument out if you want all but the first word to be passed\n through unchanged.\n Set first argument to None if you want the first word to be passed\n through unchanged.\"\"\"\n if first_func is None:\n first_func = lambda w: w\n\n def formatter_function(i, word, _):\n return first_func(word) if i == 0 else rest_func(word)\n\n return formatter_function\n\n\ndef every_word(word_func):\n \"\"\"Apply one function to every word.\"\"\"\n\n def formatter_function(i, word, _):\n return word_func(word)\n\n return formatter_function\n\n\nformatters_dict = {\n \"NOOP\": (SEP, lambda i, word, _: word),\n \"DOUBLE_UNDERSCORE\": (NOSEP, first_vs_rest(lambda w: \"__%s__\" % w)),\n \"PRIVATE_CAMEL_CASE\": (NOSEP, first_vs_rest(lambda w: w, lambda w: w.capitalize())),\n \"PROTECTED_CAMEL_CASE\": (\n NOSEP,\n first_vs_rest(lambda w: w, lambda w: w.capitalize()),\n ),\n \"PUBLIC_CAMEL_CASE\": (NOSEP, every_word(lambda w: w.capitalize())),\n \"SNAKE_CASE\": (\n NOSEP,\n first_vs_rest(lambda w: w.lower(), lambda w: \"_\" + w.lower()),\n ),\n \"NO_SPACES\": (NOSEP, every_word(lambda w: w)),\n \"DASH_SEPARATED\": words_with_joiner(\"-\"),\n \"TERMINAL_DASH_SEPARATED\": (\n NOSEP,\n first_vs_rest(lambda w: \" --\" + w.lower(), lambda w: \"-\" + w.lower()),\n ),\n \"DOUBLE_COLON_SEPARATED\": words_with_joiner(\"::\"),\n \"ALL_CAPS\": (SEP, every_word(lambda w: w.upper())),\n \"ALL_LOWERCASE\": (SEP, every_word(lambda w: w.lower())),\n \"DOUBLE_QUOTED_STRING\": (SEP, surround('\"')),\n \"SINGLE_QUOTED_STRING\": (SEP, surround(\"'\")),\n \"SPACE_SURROUNDED_STRING\": (SEP, surround(\" \")),\n \"DOT_SEPARATED\": words_with_joiner(\".\"),\n \"DOT_SNAKE\": (NOSEP, lambda i, word, _: \".\" + word if i == 0 else \"_\" + word),\n \"SLASH_SEPARATED\": (NOSEP, every_word(lambda w: \"/\" + w)),\n \"CAPITALIZE_FIRST_WORD\": (SEP, first_vs_rest(lambda w: w.capitalize())),\n \"CAPITALIZE_ALL_WORDS\": (\n SEP,\n lambda i, word, _: word.capitalize()\n if i == 0 or word not in words_to_keep_lowercase\n else word,\n ),\n \"FIRST_THREE\": (NOSEP, lambda i, word, _: word[0:3]),\n \"FIRST_FOUR\": (NOSEP, lambda i, word, _: word[0:4]),\n \"FIRST_FIVE\": (NOSEP, lambda i, word, _: word[0:5]),\n}\n\n# This is the mapping from spoken phrases to formatters\nformatters_words = {\n \"allcaps\": formatters_dict[\"ALL_CAPS\"],\n \"alldown\": formatters_dict[\"ALL_LOWERCASE\"],\n \"camel\": formatters_dict[\"PRIVATE_CAMEL_CASE\"],\n \"dotted\": formatters_dict[\"DOT_SEPARATED\"],\n \"dubstring\": formatters_dict[\"DOUBLE_QUOTED_STRING\"],\n \"dunder\": formatters_dict[\"DOUBLE_UNDERSCORE\"],\n \"hammer\": formatters_dict[\"PUBLIC_CAMEL_CASE\"],\n \"kebab\": formatters_dict[\"DASH_SEPARATED\"],\n \"packed\": formatters_dict[\"DOUBLE_COLON_SEPARATED\"],\n \"padded\": formatters_dict[\"SPACE_SURROUNDED_STRING\"],\n # \"say\": formatters_dict[\"NOOP\"],\n \"sentence\": formatters_dict[\"CAPITALIZE_FIRST_WORD\"],\n \"slasher\": formatters_dict[\"SLASH_SEPARATED\"],\n \"smash\": formatters_dict[\"NO_SPACES\"],\n \"snake\": formatters_dict[\"SNAKE_CASE\"],\n # \"speak\": formatters_dict[\"NOOP\"],\n \"string\": formatters_dict[\"SINGLE_QUOTED_STRING\"],\n \"title\": formatters_dict[\"CAPITALIZE_ALL_WORDS\"],\n # disable a few formatters for now\n # \"tree\": formatters_dict[\"FIRST_THREE\"],\n # \"quad\": formatters_dict[\"FIRST_FOUR\"],\n # \"fiver\": formatters_dict[\"FIRST_FIVE\"],\n}\n\nall_formatters = {}\nall_formatters.update(formatters_dict)\nall_formatters.update(formatters_words)\n\nmod = Module()\nmod.list(\"formatters\", desc=\"list of formatters\")\n\n\[email protected]\ndef formatters(m) -> str:\n \"Returns a comma-separated string of formatters e.g. 'SNAKE,DUBSTRING'\"\n\n\[email protected]\ndef format_text(m) -> str:\n \"Formats the text and returns a string\"\n\n\[email protected]_class\nclass Actions:\n def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:\n \"\"\"Formats a phrase according to formatters. formatters is a comma-separated string of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')\"\"\"\n return format_phrase(phrase, formatters)\n\n def formatters_help_toggle():\n \"\"\"Lists all formatters\"\"\"\n if gui.showing:\n gui.hide()\n else:\n gui.show()\n\n def formatters_recent_toggle():\n \"\"\"Toggles list of recent formatters\"\"\"\n if recent_gui.showing:\n recent_gui.hide()\n else:\n recent_gui.show()\n\n def formatters_recent_select(number: int):\n \"\"\"Inserts a recent formatter\"\"\"\n if len(formatted_phrase_history) >= number:\n return formatted_phrase_history[number - 1]\n return \"\"\n\n def formatters_clear_last():\n \"\"\"Clears the last formatted phrase\"\"\"\n if len(formatted_phrase_history) > 0:\n for character in formatted_phrase_history[0]:\n actions.edit.delete()\n\n def formatters_reformat_last(formatters: str) -> str:\n \"\"\"Reformats last formatted phrase\"\"\"\n global last_phrase\n return format_phrase(last_phrase, formatters)\n\n\[email protected](rule=\"{self.formatters}+\")\ndef formatters(m):\n return \",\".join(m.formatters_list)\n\n\[email protected](rule=\"<self.formatters> <user.text>\")\ndef format_text(m):\n return format_phrase(m.text, m.formatters)\n\n\nctx.lists[\"self.formatters\"] = formatters_words.keys()\n\n\[email protected](software=False)\ndef gui(gui: imgui.GUI):\n gui.text(\"List formatters\")\n gui.line()\n for name in sorted(set(formatters_words.keys())):\n gui.text(f\"{name} | {format_phrase_no_history(['one', 'two', 'three'], name)}\")\n\n\[email protected](software=False)\ndef recent_gui(gui: imgui.GUI):\n gui.text(\"Recent formatters\")\n gui.line()\n for index, result in enumerate(formatted_phrase_history, 1):\n gui.text(\"{}. {}\".format(index, result))\n", "path": "code/formatters.py"}]} | 3,330 | 303 |
gh_patches_debug_2926 | rasdani/github-patches | git_diff | Mailu__Mailu-2116 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error 404 not found when opening admin after upgrade 1.8 to master
## Before you open your issue
- [X] Check if no issue or pull-request for this already exists.
- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [X] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [X] docker-compose
### Versions
Before upgrade: Docker 1.8 images.
After upgrade: Docker master images (pulled 30 December 2021).
## Description
**Mailu 1.8** image redirects `/admin` to `/admin/ui`.
**Mailu master** image no longer redirects `/admin/ui` as the `ui` part in the URL has been removed according to [Tomcat 1929.enhacement](https://github.com/Mailu/Mailu/blob/master/towncrier/newsfragments/1929.enhancement):
> Removed the /admin/ prefix to reduce complexity of routing with Mailu. Admin is accessible directly via /admin instead of /admin/ui
After the upgrade from `1.8` to `master` and visiting the admin page, the browser still uses the cached URL `/admin/ui` and results in 404 not found.
## Replication Steps
1. Create 1.8 production environment on AMD64 platform using `mailu 1.8 Docker images`.
2. Make sure the Admin page works.
3. Remove docker containers (`docker-compose down`).
4. Recreate **all** containers at the same time using `mailu master Docker images`.
5. Open root mail domain. The browser uses the cached URL `admin/ui` and shows Error 404 not found.
Note: Tested with `TLS_FLAVOR=letsencrypt`, admin and roundcube and Firefox.
## Expected behaviour
Backwards compatibility after Mailu 1.8 upgrade without the need of removing browser caches.
## Front log
```
front_1 | <IP> - - [30/Dec/2021:10:14:35 +0000] "GET /admin/ui/ HTTP/2.0" 404 198 "https://mail.mydomain.nl/sso/login" "Mozilla/5.0 (X11; Linux x86_64; rv:95.0) Gecko/20100101 Firefox/95.0"
```
## Bugfix
Proposal is to redirect `/admin/ui` always to `/admin` to prevent browser caching problems after the upgrade.
</issue>
<code>
[start of core/admin/mailu/ui/views/base.py]
1 from mailu import models, utils
2 from mailu.ui import ui, forms, access
3
4 from flask import current_app as app
5 import flask
6 import flask_login
7
8
9 @ui.route('/', methods=["GET"])
10 @access.authenticated
11 def index():
12 return flask.redirect(flask.url_for('.user_settings'))
13
14 @ui.route('/announcement', methods=['GET', 'POST'])
15 @access.global_admin
16 def announcement():
17 form = forms.AnnouncementForm()
18 if form.validate_on_submit():
19 for user in models.User.query.all():
20 user.sendmail(form.announcement_subject.data,
21 form.announcement_body.data)
22 # Force-empty the form
23 form.announcement_subject.data = ''
24 form.announcement_body.data = ''
25 flask.flash('Your announcement was sent', 'success')
26 return flask.render_template('announcement.html', form=form)
27
28 @ui.route('/webmail', methods=['GET'])
29 def webmail():
30 return flask.redirect(app.config['WEB_WEBMAIL'])
31
32 @ui.route('/client', methods=['GET'])
33 def client():
34 return flask.render_template('client.html')
35
36 @ui.route('/webui_antispam', methods=['GET'])
37 def antispam():
38 return flask.render_template('antispam.html')
39
[end of core/admin/mailu/ui/views/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/ui/views/base.py b/core/admin/mailu/ui/views/base.py
--- a/core/admin/mailu/ui/views/base.py
+++ b/core/admin/mailu/ui/views/base.py
@@ -11,6 +11,10 @@
def index():
return flask.redirect(flask.url_for('.user_settings'))
[email protected]('/ui/')
+def redirect_old_path():
+ return flask.redirect(flask.url_for('.index'), code=301)
+
@ui.route('/announcement', methods=['GET', 'POST'])
@access.global_admin
def announcement():
| {"golden_diff": "diff --git a/core/admin/mailu/ui/views/base.py b/core/admin/mailu/ui/views/base.py\n--- a/core/admin/mailu/ui/views/base.py\n+++ b/core/admin/mailu/ui/views/base.py\n@@ -11,6 +11,10 @@\n def index():\n return flask.redirect(flask.url_for('.user_settings'))\n \[email protected]('/ui/')\n+def redirect_old_path():\n+ return flask.redirect(flask.url_for('.index'), code=301)\n+\n @ui.route('/announcement', methods=['GET', 'POST'])\n @access.global_admin\n def announcement():\n", "issue": "Error 404 not found when opening admin after upgrade 1.8 to master\n## Before you open your issue\r\n- [X] Check if no issue or pull-request for this already exists.\r\n- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [X] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [X] docker-compose\r\n\r\n### Versions\r\n\r\nBefore upgrade: Docker 1.8 images.\r\nAfter upgrade: Docker master images (pulled 30 December 2021).\r\n\r\n## Description\r\n\r\n**Mailu 1.8** image redirects `/admin` to `/admin/ui`.\r\n\r\n**Mailu master** image no longer redirects `/admin/ui` as the `ui` part in the URL has been removed according to [Tomcat 1929.enhacement](https://github.com/Mailu/Mailu/blob/master/towncrier/newsfragments/1929.enhancement):\r\n\r\n> Removed the /admin/ prefix to reduce complexity of routing with Mailu. Admin is accessible directly via /admin instead of /admin/ui\r\n\r\nAfter the upgrade from `1.8` to `master` and visiting the admin page, the browser still uses the cached URL `/admin/ui` and results in 404 not found.\r\n\r\n\r\n## Replication Steps\r\n\r\n1. Create 1.8 production environment on AMD64 platform using `mailu 1.8 Docker images`.\r\n2. Make sure the Admin page works.\r\n3. Remove docker containers (`docker-compose down`).\r\n4. Recreate **all** containers at the same time using `mailu master Docker images`.\r\n5. Open root mail domain. The browser uses the cached URL `admin/ui` and shows Error 404 not found.\r\n\r\nNote: Tested with `TLS_FLAVOR=letsencrypt`, admin and roundcube and Firefox.\r\n\r\n\r\n## Expected behaviour\r\n\r\nBackwards compatibility after Mailu 1.8 upgrade without the need of removing browser caches.\r\n\r\n## Front log\r\n\r\n```\r\nfront_1 | <IP> - - [30/Dec/2021:10:14:35 +0000] \"GET /admin/ui/ HTTP/2.0\" 404 198 \"https://mail.mydomain.nl/sso/login\" \"Mozilla/5.0 (X11; Linux x86_64; rv:95.0) Gecko/20100101 Firefox/95.0\"\r\n```\r\n\r\n## Bugfix\r\n\r\nProposal is to redirect `/admin/ui` always to `/admin` to prevent browser caching problems after the upgrade.\n", "before_files": [{"content": "from mailu import models, utils\nfrom mailu.ui import ui, forms, access\n\nfrom flask import current_app as app\nimport flask\nimport flask_login\n\n\[email protected]('/', methods=[\"GET\"])\[email protected]\ndef index():\n return flask.redirect(flask.url_for('.user_settings'))\n\[email protected]('/announcement', methods=['GET', 'POST'])\[email protected]_admin\ndef announcement():\n form = forms.AnnouncementForm()\n if form.validate_on_submit():\n for user in models.User.query.all():\n user.sendmail(form.announcement_subject.data,\n form.announcement_body.data)\n # Force-empty the form\n form.announcement_subject.data = ''\n form.announcement_body.data = ''\n flask.flash('Your announcement was sent', 'success')\n return flask.render_template('announcement.html', form=form)\n\[email protected]('/webmail', methods=['GET'])\ndef webmail():\n return flask.redirect(app.config['WEB_WEBMAIL'])\n\[email protected]('/client', methods=['GET'])\ndef client():\n return flask.render_template('client.html')\n\[email protected]('/webui_antispam', methods=['GET'])\ndef antispam():\n return flask.render_template('antispam.html')\n", "path": "core/admin/mailu/ui/views/base.py"}]} | 1,509 | 126 |
gh_patches_debug_15945 | rasdani/github-patches | git_diff | vyperlang__vyper-2059 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Runtime error when making an external call to the same contract
This code makes it an error to make an external call to the same contract:
https://github.com/ethereum/vyper/blob/c296b2d7532d913103aad494b749f8179a3acddc/vyper/parser/external_call.py#L75
This is a surprising limitation. It doesn't seem to have a clear benefit, and it could be problematic. As an example, multisig wallets often use self-calls to perform administrative functions. This way the wallet owners have to agree to make a change like lowering the required threshold of signatures. In Vyper, this would produce a runtime error.
</issue>
<code>
[start of vyper/parser/external_call.py]
1 from vyper import ast as vy_ast
2 from vyper.exceptions import (
3 StateAccessViolation,
4 StructureException,
5 TypeCheckFailure,
6 )
7 from vyper.parser.lll_node import LLLnode
8 from vyper.parser.parser_utils import getpos, pack_arguments, unwrap_location
9 from vyper.types import (
10 BaseType,
11 ByteArrayLike,
12 ListType,
13 TupleLike,
14 get_size_of_type,
15 )
16
17
18 def external_call(node, context, interface_name, contract_address, pos, value=None, gas=None):
19 from vyper.parser.expr import Expr
20
21 if value is None:
22 value = 0
23 if gas is None:
24 gas = "gas"
25 if contract_address.value == "address":
26 raise StructureException("External calls to self are not permitted.", node)
27 method_name = node.func.attr
28 sig = context.sigs[interface_name][method_name]
29 inargs, inargsize, _ = pack_arguments(
30 sig, [Expr(arg, context).lll_node for arg in node.args], context, node.func,
31 )
32 output_placeholder, output_size, returner = get_external_call_output(sig, context)
33 sub = [
34 "seq",
35 ["assert", ["extcodesize", contract_address]],
36 ["assert", ["ne", "address", contract_address]],
37 ]
38 if context.is_constant() and not sig.const:
39 # TODO this can probably go
40 raise StateAccessViolation(
41 f"May not call state modifying function '{method_name}' "
42 f"within {context.pp_constancy()}.",
43 node,
44 )
45
46 if context.is_constant() or sig.const:
47 sub.append(
48 [
49 "assert",
50 [
51 "staticcall",
52 gas,
53 contract_address,
54 inargs,
55 inargsize,
56 output_placeholder,
57 output_size,
58 ],
59 ]
60 )
61 else:
62 sub.append(
63 [
64 "assert",
65 [
66 "call",
67 gas,
68 contract_address,
69 value,
70 inargs,
71 inargsize,
72 output_placeholder,
73 output_size,
74 ],
75 ]
76 )
77 sub.extend(returner)
78 o = LLLnode.from_list(sub, typ=sig.output_type, location="memory", pos=getpos(node))
79 return o
80
81
82 def get_external_call_output(sig, context):
83 if not sig.output_type:
84 return 0, 0, []
85 output_placeholder = context.new_placeholder(typ=sig.output_type)
86 output_size = get_size_of_type(sig.output_type) * 32
87 if isinstance(sig.output_type, BaseType):
88 returner = [0, output_placeholder]
89 elif isinstance(sig.output_type, ByteArrayLike):
90 returner = [0, output_placeholder + 32]
91 elif isinstance(sig.output_type, TupleLike):
92 returner = [0, output_placeholder]
93 elif isinstance(sig.output_type, ListType):
94 returner = [0, output_placeholder]
95 else:
96 raise TypeCheckFailure(f"Invalid output type: {sig.output_type}")
97 return output_placeholder, output_size, returner
98
99
100 def get_external_interface_keywords(stmt_expr, context):
101 from vyper.parser.expr import Expr
102
103 value, gas = None, None
104 for kw in stmt_expr.keywords:
105 if kw.arg == "gas":
106 gas = Expr.parse_value_expr(kw.value, context)
107 elif kw.arg == "value":
108 value = Expr.parse_value_expr(kw.value, context)
109 else:
110 raise TypeCheckFailure("Unexpected keyword argument")
111 return value, gas
112
113
114 def make_external_call(stmt_expr, context):
115 from vyper.parser.expr import Expr
116
117 value, gas = get_external_interface_keywords(stmt_expr, context)
118
119 if isinstance(stmt_expr.func, vy_ast.Attribute) and isinstance(
120 stmt_expr.func.value, vy_ast.Call
121 ):
122 contract_name = stmt_expr.func.value.func.id
123 contract_address = Expr.parse_value_expr(stmt_expr.func.value.args[0], context)
124
125 return external_call(
126 stmt_expr,
127 context,
128 contract_name,
129 contract_address,
130 pos=getpos(stmt_expr),
131 value=value,
132 gas=gas,
133 )
134
135 elif (
136 isinstance(stmt_expr.func.value, vy_ast.Attribute)
137 and stmt_expr.func.value.attr in context.sigs
138 ): # noqa: E501
139 contract_name = stmt_expr.func.value.attr
140 var = context.globals[stmt_expr.func.value.attr]
141 contract_address = unwrap_location(
142 LLLnode.from_list(
143 var.pos,
144 typ=var.typ,
145 location="storage",
146 pos=getpos(stmt_expr),
147 annotation="self." + stmt_expr.func.value.attr,
148 )
149 )
150
151 return external_call(
152 stmt_expr,
153 context,
154 contract_name,
155 contract_address,
156 pos=getpos(stmt_expr),
157 value=value,
158 gas=gas,
159 )
160
161 elif (
162 isinstance(stmt_expr.func.value, vy_ast.Attribute)
163 and stmt_expr.func.value.attr in context.globals
164 and hasattr(context.globals[stmt_expr.func.value.attr].typ, "name")
165 ):
166
167 contract_name = context.globals[stmt_expr.func.value.attr].typ.name
168 var = context.globals[stmt_expr.func.value.attr]
169 contract_address = unwrap_location(
170 LLLnode.from_list(
171 var.pos,
172 typ=var.typ,
173 location="storage",
174 pos=getpos(stmt_expr),
175 annotation="self." + stmt_expr.func.value.attr,
176 )
177 )
178
179 return external_call(
180 stmt_expr,
181 context,
182 contract_name,
183 contract_address,
184 pos=getpos(stmt_expr),
185 value=value,
186 gas=gas,
187 )
188
189 else:
190 raise StructureException("Unsupported operator.", stmt_expr)
191
[end of vyper/parser/external_call.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vyper/parser/external_call.py b/vyper/parser/external_call.py
--- a/vyper/parser/external_call.py
+++ b/vyper/parser/external_call.py
@@ -22,8 +22,7 @@
value = 0
if gas is None:
gas = "gas"
- if contract_address.value == "address":
- raise StructureException("External calls to self are not permitted.", node)
+
method_name = node.func.attr
sig = context.sigs[interface_name][method_name]
inargs, inargsize, _ = pack_arguments(
@@ -33,7 +32,6 @@
sub = [
"seq",
["assert", ["extcodesize", contract_address]],
- ["assert", ["ne", "address", contract_address]],
]
if context.is_constant() and not sig.const:
# TODO this can probably go
| {"golden_diff": "diff --git a/vyper/parser/external_call.py b/vyper/parser/external_call.py\n--- a/vyper/parser/external_call.py\n+++ b/vyper/parser/external_call.py\n@@ -22,8 +22,7 @@\n value = 0\n if gas is None:\n gas = \"gas\"\n- if contract_address.value == \"address\":\n- raise StructureException(\"External calls to self are not permitted.\", node)\n+\n method_name = node.func.attr\n sig = context.sigs[interface_name][method_name]\n inargs, inargsize, _ = pack_arguments(\n@@ -33,7 +32,6 @@\n sub = [\n \"seq\",\n [\"assert\", [\"extcodesize\", contract_address]],\n- [\"assert\", [\"ne\", \"address\", contract_address]],\n ]\n if context.is_constant() and not sig.const:\n # TODO this can probably go\n", "issue": "Runtime error when making an external call to the same contract\nThis code makes it an error to make an external call to the same contract:\r\n\r\nhttps://github.com/ethereum/vyper/blob/c296b2d7532d913103aad494b749f8179a3acddc/vyper/parser/external_call.py#L75\r\n\r\nThis is a surprising limitation. It doesn't seem to have a clear benefit, and it could be problematic. As an example, multisig wallets often use self-calls to perform administrative functions. This way the wallet owners have to agree to make a change like lowering the required threshold of signatures. In Vyper, this would produce a runtime error.\n", "before_files": [{"content": "from vyper import ast as vy_ast\nfrom vyper.exceptions import (\n StateAccessViolation,\n StructureException,\n TypeCheckFailure,\n)\nfrom vyper.parser.lll_node import LLLnode\nfrom vyper.parser.parser_utils import getpos, pack_arguments, unwrap_location\nfrom vyper.types import (\n BaseType,\n ByteArrayLike,\n ListType,\n TupleLike,\n get_size_of_type,\n)\n\n\ndef external_call(node, context, interface_name, contract_address, pos, value=None, gas=None):\n from vyper.parser.expr import Expr\n\n if value is None:\n value = 0\n if gas is None:\n gas = \"gas\"\n if contract_address.value == \"address\":\n raise StructureException(\"External calls to self are not permitted.\", node)\n method_name = node.func.attr\n sig = context.sigs[interface_name][method_name]\n inargs, inargsize, _ = pack_arguments(\n sig, [Expr(arg, context).lll_node for arg in node.args], context, node.func,\n )\n output_placeholder, output_size, returner = get_external_call_output(sig, context)\n sub = [\n \"seq\",\n [\"assert\", [\"extcodesize\", contract_address]],\n [\"assert\", [\"ne\", \"address\", contract_address]],\n ]\n if context.is_constant() and not sig.const:\n # TODO this can probably go\n raise StateAccessViolation(\n f\"May not call state modifying function '{method_name}' \"\n f\"within {context.pp_constancy()}.\",\n node,\n )\n\n if context.is_constant() or sig.const:\n sub.append(\n [\n \"assert\",\n [\n \"staticcall\",\n gas,\n contract_address,\n inargs,\n inargsize,\n output_placeholder,\n output_size,\n ],\n ]\n )\n else:\n sub.append(\n [\n \"assert\",\n [\n \"call\",\n gas,\n contract_address,\n value,\n inargs,\n inargsize,\n output_placeholder,\n output_size,\n ],\n ]\n )\n sub.extend(returner)\n o = LLLnode.from_list(sub, typ=sig.output_type, location=\"memory\", pos=getpos(node))\n return o\n\n\ndef get_external_call_output(sig, context):\n if not sig.output_type:\n return 0, 0, []\n output_placeholder = context.new_placeholder(typ=sig.output_type)\n output_size = get_size_of_type(sig.output_type) * 32\n if isinstance(sig.output_type, BaseType):\n returner = [0, output_placeholder]\n elif isinstance(sig.output_type, ByteArrayLike):\n returner = [0, output_placeholder + 32]\n elif isinstance(sig.output_type, TupleLike):\n returner = [0, output_placeholder]\n elif isinstance(sig.output_type, ListType):\n returner = [0, output_placeholder]\n else:\n raise TypeCheckFailure(f\"Invalid output type: {sig.output_type}\")\n return output_placeholder, output_size, returner\n\n\ndef get_external_interface_keywords(stmt_expr, context):\n from vyper.parser.expr import Expr\n\n value, gas = None, None\n for kw in stmt_expr.keywords:\n if kw.arg == \"gas\":\n gas = Expr.parse_value_expr(kw.value, context)\n elif kw.arg == \"value\":\n value = Expr.parse_value_expr(kw.value, context)\n else:\n raise TypeCheckFailure(\"Unexpected keyword argument\")\n return value, gas\n\n\ndef make_external_call(stmt_expr, context):\n from vyper.parser.expr import Expr\n\n value, gas = get_external_interface_keywords(stmt_expr, context)\n\n if isinstance(stmt_expr.func, vy_ast.Attribute) and isinstance(\n stmt_expr.func.value, vy_ast.Call\n ):\n contract_name = stmt_expr.func.value.func.id\n contract_address = Expr.parse_value_expr(stmt_expr.func.value.args[0], context)\n\n return external_call(\n stmt_expr,\n context,\n contract_name,\n contract_address,\n pos=getpos(stmt_expr),\n value=value,\n gas=gas,\n )\n\n elif (\n isinstance(stmt_expr.func.value, vy_ast.Attribute)\n and stmt_expr.func.value.attr in context.sigs\n ): # noqa: E501\n contract_name = stmt_expr.func.value.attr\n var = context.globals[stmt_expr.func.value.attr]\n contract_address = unwrap_location(\n LLLnode.from_list(\n var.pos,\n typ=var.typ,\n location=\"storage\",\n pos=getpos(stmt_expr),\n annotation=\"self.\" + stmt_expr.func.value.attr,\n )\n )\n\n return external_call(\n stmt_expr,\n context,\n contract_name,\n contract_address,\n pos=getpos(stmt_expr),\n value=value,\n gas=gas,\n )\n\n elif (\n isinstance(stmt_expr.func.value, vy_ast.Attribute)\n and stmt_expr.func.value.attr in context.globals\n and hasattr(context.globals[stmt_expr.func.value.attr].typ, \"name\")\n ):\n\n contract_name = context.globals[stmt_expr.func.value.attr].typ.name\n var = context.globals[stmt_expr.func.value.attr]\n contract_address = unwrap_location(\n LLLnode.from_list(\n var.pos,\n typ=var.typ,\n location=\"storage\",\n pos=getpos(stmt_expr),\n annotation=\"self.\" + stmt_expr.func.value.attr,\n )\n )\n\n return external_call(\n stmt_expr,\n context,\n contract_name,\n contract_address,\n pos=getpos(stmt_expr),\n value=value,\n gas=gas,\n )\n\n else:\n raise StructureException(\"Unsupported operator.\", stmt_expr)\n", "path": "vyper/parser/external_call.py"}]} | 2,375 | 198 |
gh_patches_debug_36714 | rasdani/github-patches | git_diff | pytorch__vision-2142 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Resume in the Segmentation example
## 🐛 Bug
The segmentation training [script](https://github.com/pytorch/vision/blob/7b60f4db9707d7afdbb87fd4e8ef6906ca014720/references/segmentation/train.py) doesn't seem to allow correct resuming. I think [lines 131-133](https://github.com/pytorch/vision/blob/7b60f4db9707d7afdbb87fd4e8ef6906ca014720/references/segmentation/train.py#L131) should be changed to:
```
start_epoch = 0
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
start_epoch = checkpoint['epoch'] + 1
```
Then, [line 161](https://github.com/pytorch/vision/blob/7b60f4db9707d7afdbb87fd4e8ef6906ca014720/references/segmentation/train.py#L161) and [lines 167-173](https://github.com/pytorch/vision/blob/7b60f4db9707d7afdbb87fd4e8ef6906ca014720/references/segmentation/train.py#L167) should be changed respectively to:
```
for epoch in range(start_epoch, args.epochs):
```
and
```
utils.save_on_master(
{
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch
},
```
Besides, is there a good reason to set [`batch_size=1`](https://github.com/pytorch/vision/blob/7b60f4db9707d7afdbb87fd4e8ef6906ca014720/references/segmentation/train.py#L120) (instead of `args.batch_size` or even `2*args.batch_size`) for `data_loader_test`?
Thanks.
</issue>
<code>
[start of references/segmentation/train.py]
1 import datetime
2 import os
3 import time
4
5 import torch
6 import torch.utils.data
7 from torch import nn
8 import torchvision
9
10 from coco_utils import get_coco
11 import transforms as T
12 import utils
13
14
15 def get_dataset(name, image_set, transform):
16 def sbd(*args, **kwargs):
17 return torchvision.datasets.SBDataset(*args, mode='segmentation', **kwargs)
18 paths = {
19 "voc": ('/datasets01/VOC/060817/', torchvision.datasets.VOCSegmentation, 21),
20 "voc_aug": ('/datasets01/SBDD/072318/', sbd, 21),
21 "coco": ('/datasets01/COCO/022719/', get_coco, 21)
22 }
23 p, ds_fn, num_classes = paths[name]
24
25 ds = ds_fn(p, image_set=image_set, transforms=transform)
26 return ds, num_classes
27
28
29 def get_transform(train):
30 base_size = 520
31 crop_size = 480
32
33 min_size = int((0.5 if train else 1.0) * base_size)
34 max_size = int((2.0 if train else 1.0) * base_size)
35 transforms = []
36 transforms.append(T.RandomResize(min_size, max_size))
37 if train:
38 transforms.append(T.RandomHorizontalFlip(0.5))
39 transforms.append(T.RandomCrop(crop_size))
40 transforms.append(T.ToTensor())
41 transforms.append(T.Normalize(mean=[0.485, 0.456, 0.406],
42 std=[0.229, 0.224, 0.225]))
43
44 return T.Compose(transforms)
45
46
47 def criterion(inputs, target):
48 losses = {}
49 for name, x in inputs.items():
50 losses[name] = nn.functional.cross_entropy(x, target, ignore_index=255)
51
52 if len(losses) == 1:
53 return losses['out']
54
55 return losses['out'] + 0.5 * losses['aux']
56
57
58 def evaluate(model, data_loader, device, num_classes):
59 model.eval()
60 confmat = utils.ConfusionMatrix(num_classes)
61 metric_logger = utils.MetricLogger(delimiter=" ")
62 header = 'Test:'
63 with torch.no_grad():
64 for image, target in metric_logger.log_every(data_loader, 100, header):
65 image, target = image.to(device), target.to(device)
66 output = model(image)
67 output = output['out']
68
69 confmat.update(target.flatten(), output.argmax(1).flatten())
70
71 confmat.reduce_from_all_processes()
72
73 return confmat
74
75
76 def train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, print_freq):
77 model.train()
78 metric_logger = utils.MetricLogger(delimiter=" ")
79 metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}'))
80 header = 'Epoch: [{}]'.format(epoch)
81 for image, target in metric_logger.log_every(data_loader, print_freq, header):
82 image, target = image.to(device), target.to(device)
83 output = model(image)
84 loss = criterion(output, target)
85
86 optimizer.zero_grad()
87 loss.backward()
88 optimizer.step()
89
90 lr_scheduler.step()
91
92 metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
93
94
95 def main(args):
96 if args.output_dir:
97 utils.mkdir(args.output_dir)
98
99 utils.init_distributed_mode(args)
100 print(args)
101
102 device = torch.device(args.device)
103
104 dataset, num_classes = get_dataset(args.dataset, "train", get_transform(train=True))
105 dataset_test, _ = get_dataset(args.dataset, "val", get_transform(train=False))
106
107 if args.distributed:
108 train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
109 test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
110 else:
111 train_sampler = torch.utils.data.RandomSampler(dataset)
112 test_sampler = torch.utils.data.SequentialSampler(dataset_test)
113
114 data_loader = torch.utils.data.DataLoader(
115 dataset, batch_size=args.batch_size,
116 sampler=train_sampler, num_workers=args.workers,
117 collate_fn=utils.collate_fn, drop_last=True)
118
119 data_loader_test = torch.utils.data.DataLoader(
120 dataset_test, batch_size=1,
121 sampler=test_sampler, num_workers=args.workers,
122 collate_fn=utils.collate_fn)
123
124 model = torchvision.models.segmentation.__dict__[args.model](num_classes=num_classes,
125 aux_loss=args.aux_loss,
126 pretrained=args.pretrained)
127 model.to(device)
128 if args.distributed:
129 model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
130
131 if args.resume:
132 checkpoint = torch.load(args.resume, map_location='cpu')
133 model.load_state_dict(checkpoint['model'])
134
135 model_without_ddp = model
136 if args.distributed:
137 model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
138 model_without_ddp = model.module
139
140 if args.test_only:
141 confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes)
142 print(confmat)
143 return
144
145 params_to_optimize = [
146 {"params": [p for p in model_without_ddp.backbone.parameters() if p.requires_grad]},
147 {"params": [p for p in model_without_ddp.classifier.parameters() if p.requires_grad]},
148 ]
149 if args.aux_loss:
150 params = [p for p in model_without_ddp.aux_classifier.parameters() if p.requires_grad]
151 params_to_optimize.append({"params": params, "lr": args.lr * 10})
152 optimizer = torch.optim.SGD(
153 params_to_optimize,
154 lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
155
156 lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
157 optimizer,
158 lambda x: (1 - x / (len(data_loader) * args.epochs)) ** 0.9)
159
160 start_time = time.time()
161 for epoch in range(args.epochs):
162 if args.distributed:
163 train_sampler.set_epoch(epoch)
164 train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, args.print_freq)
165 confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes)
166 print(confmat)
167 utils.save_on_master(
168 {
169 'model': model_without_ddp.state_dict(),
170 'optimizer': optimizer.state_dict(),
171 'epoch': epoch,
172 'args': args
173 },
174 os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))
175
176 total_time = time.time() - start_time
177 total_time_str = str(datetime.timedelta(seconds=int(total_time)))
178 print('Training time {}'.format(total_time_str))
179
180
181 def parse_args():
182 import argparse
183 parser = argparse.ArgumentParser(description='PyTorch Segmentation Training')
184
185 parser.add_argument('--dataset', default='voc', help='dataset')
186 parser.add_argument('--model', default='fcn_resnet101', help='model')
187 parser.add_argument('--aux-loss', action='store_true', help='auxiliar loss')
188 parser.add_argument('--device', default='cuda', help='device')
189 parser.add_argument('-b', '--batch-size', default=8, type=int)
190 parser.add_argument('--epochs', default=30, type=int, metavar='N',
191 help='number of total epochs to run')
192
193 parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
194 help='number of data loading workers (default: 16)')
195 parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')
196 parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
197 help='momentum')
198 parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
199 metavar='W', help='weight decay (default: 1e-4)',
200 dest='weight_decay')
201 parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
202 parser.add_argument('--output-dir', default='.', help='path where to save')
203 parser.add_argument('--resume', default='', help='resume from checkpoint')
204 parser.add_argument(
205 "--test-only",
206 dest="test_only",
207 help="Only test the model",
208 action="store_true",
209 )
210 parser.add_argument(
211 "--pretrained",
212 dest="pretrained",
213 help="Use pre-trained models from the modelzoo",
214 action="store_true",
215 )
216 # distributed training parameters
217 parser.add_argument('--world-size', default=1, type=int,
218 help='number of distributed processes')
219 parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
220
221 args = parser.parse_args()
222 return args
223
224
225 if __name__ == "__main__":
226 args = parse_args()
227 main(args)
228
[end of references/segmentation/train.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/references/segmentation/train.py b/references/segmentation/train.py
--- a/references/segmentation/train.py
+++ b/references/segmentation/train.py
@@ -128,10 +128,6 @@
if args.distributed:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
- if args.resume:
- checkpoint = torch.load(args.resume, map_location='cpu')
- model.load_state_dict(checkpoint['model'])
-
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
@@ -157,8 +153,15 @@
optimizer,
lambda x: (1 - x / (len(data_loader) * args.epochs)) ** 0.9)
+ if args.resume:
+ checkpoint = torch.load(args.resume, map_location='cpu')
+ model_without_ddp.load_state_dict(checkpoint['model'])
+ optimizer.load_state_dict(checkpoint['optimizer'])
+ lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
+ args.start_epoch = checkpoint['epoch'] + 1
+
start_time = time.time()
- for epoch in range(args.epochs):
+ for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, args.print_freq)
@@ -168,6 +171,7 @@
{
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
+ 'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args
},
@@ -201,6 +205,8 @@
parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('--output-dir', default='.', help='path where to save')
parser.add_argument('--resume', default='', help='resume from checkpoint')
+ parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
+ help='start epoch')
parser.add_argument(
"--test-only",
dest="test_only",
| {"golden_diff": "diff --git a/references/segmentation/train.py b/references/segmentation/train.py\n--- a/references/segmentation/train.py\n+++ b/references/segmentation/train.py\n@@ -128,10 +128,6 @@\n if args.distributed:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n \n- if args.resume:\n- checkpoint = torch.load(args.resume, map_location='cpu')\n- model.load_state_dict(checkpoint['model'])\n-\n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n@@ -157,8 +153,15 @@\n optimizer,\n lambda x: (1 - x / (len(data_loader) * args.epochs)) ** 0.9)\n \n+ if args.resume:\n+ checkpoint = torch.load(args.resume, map_location='cpu')\n+ model_without_ddp.load_state_dict(checkpoint['model'])\n+ optimizer.load_state_dict(checkpoint['optimizer'])\n+ lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n+ args.start_epoch = checkpoint['epoch'] + 1\n+\n start_time = time.time()\n- for epoch in range(args.epochs):\n+ for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, args.print_freq)\n@@ -168,6 +171,7 @@\n {\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n+ 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch,\n 'args': args\n },\n@@ -201,6 +205,8 @@\n parser.add_argument('--print-freq', default=10, type=int, help='print frequency')\n parser.add_argument('--output-dir', default='.', help='path where to save')\n parser.add_argument('--resume', default='', help='resume from checkpoint')\n+ parser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n+ help='start epoch')\n parser.add_argument(\n \"--test-only\",\n dest=\"test_only\",\n", "issue": "Resume in the Segmentation example\n## \ud83d\udc1b Bug\r\n\r\nThe segmentation training [script](https://github.com/pytorch/vision/blob/7b60f4db9707d7afdbb87fd4e8ef6906ca014720/references/segmentation/train.py) doesn't seem to allow correct resuming. I think [lines 131-133](https://github.com/pytorch/vision/blob/7b60f4db9707d7afdbb87fd4e8ef6906ca014720/references/segmentation/train.py#L131) should be changed to:\r\n\r\n```\r\n start_epoch = 0\r\n if args.resume:\r\n checkpoint = torch.load(args.resume, map_location='cpu')\r\n model.load_state_dict(checkpoint['model'])\r\n optimizer.load_state_dict(checkpoint['optimizer'])\r\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\r\n start_epoch = checkpoint['epoch'] + 1\r\n```\r\n\r\nThen, [line 161](https://github.com/pytorch/vision/blob/7b60f4db9707d7afdbb87fd4e8ef6906ca014720/references/segmentation/train.py#L161) and [lines 167-173](https://github.com/pytorch/vision/blob/7b60f4db9707d7afdbb87fd4e8ef6906ca014720/references/segmentation/train.py#L167) should be changed respectively to:\r\n\r\n```\r\n for epoch in range(start_epoch, args.epochs):\r\n```\r\n\r\nand\r\n\r\n```\r\nutils.save_on_master(\r\n {\r\n 'model': model_without_ddp.state_dict(),\r\n 'optimizer': optimizer.state_dict(),\r\n 'lr_scheduler': lr_scheduler.state_dict(),\r\n 'epoch': epoch\r\n },\r\n```\r\n\r\nBesides, is there a good reason to set [`batch_size=1`](https://github.com/pytorch/vision/blob/7b60f4db9707d7afdbb87fd4e8ef6906ca014720/references/segmentation/train.py#L120) (instead of `args.batch_size` or even `2*args.batch_size`) for `data_loader_test`?\r\n\r\nThanks.\r\n\n", "before_files": [{"content": "import datetime\nimport os\nimport time\n\nimport torch\nimport torch.utils.data\nfrom torch import nn\nimport torchvision\n\nfrom coco_utils import get_coco\nimport transforms as T\nimport utils\n\n\ndef get_dataset(name, image_set, transform):\n def sbd(*args, **kwargs):\n return torchvision.datasets.SBDataset(*args, mode='segmentation', **kwargs)\n paths = {\n \"voc\": ('/datasets01/VOC/060817/', torchvision.datasets.VOCSegmentation, 21),\n \"voc_aug\": ('/datasets01/SBDD/072318/', sbd, 21),\n \"coco\": ('/datasets01/COCO/022719/', get_coco, 21)\n }\n p, ds_fn, num_classes = paths[name]\n\n ds = ds_fn(p, image_set=image_set, transforms=transform)\n return ds, num_classes\n\n\ndef get_transform(train):\n base_size = 520\n crop_size = 480\n\n min_size = int((0.5 if train else 1.0) * base_size)\n max_size = int((2.0 if train else 1.0) * base_size)\n transforms = []\n transforms.append(T.RandomResize(min_size, max_size))\n if train:\n transforms.append(T.RandomHorizontalFlip(0.5))\n transforms.append(T.RandomCrop(crop_size))\n transforms.append(T.ToTensor())\n transforms.append(T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]))\n\n return T.Compose(transforms)\n\n\ndef criterion(inputs, target):\n losses = {}\n for name, x in inputs.items():\n losses[name] = nn.functional.cross_entropy(x, target, ignore_index=255)\n\n if len(losses) == 1:\n return losses['out']\n\n return losses['out'] + 0.5 * losses['aux']\n\n\ndef evaluate(model, data_loader, device, num_classes):\n model.eval()\n confmat = utils.ConfusionMatrix(num_classes)\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = 'Test:'\n with torch.no_grad():\n for image, target in metric_logger.log_every(data_loader, 100, header):\n image, target = image.to(device), target.to(device)\n output = model(image)\n output = output['out']\n\n confmat.update(target.flatten(), output.argmax(1).flatten())\n\n confmat.reduce_from_all_processes()\n\n return confmat\n\n\ndef train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, print_freq):\n model.train()\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}'))\n header = 'Epoch: [{}]'.format(epoch)\n for image, target in metric_logger.log_every(data_loader, print_freq, header):\n image, target = image.to(device), target.to(device)\n output = model(image)\n loss = criterion(output, target)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n lr_scheduler.step()\n\n metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0][\"lr\"])\n\n\ndef main(args):\n if args.output_dir:\n utils.mkdir(args.output_dir)\n\n utils.init_distributed_mode(args)\n print(args)\n\n device = torch.device(args.device)\n\n dataset, num_classes = get_dataset(args.dataset, \"train\", get_transform(train=True))\n dataset_test, _ = get_dataset(args.dataset, \"val\", get_transform(train=False))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)\n else:\n train_sampler = torch.utils.data.RandomSampler(dataset)\n test_sampler = torch.utils.data.SequentialSampler(dataset_test)\n\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=args.batch_size,\n sampler=train_sampler, num_workers=args.workers,\n collate_fn=utils.collate_fn, drop_last=True)\n\n data_loader_test = torch.utils.data.DataLoader(\n dataset_test, batch_size=1,\n sampler=test_sampler, num_workers=args.workers,\n collate_fn=utils.collate_fn)\n\n model = torchvision.models.segmentation.__dict__[args.model](num_classes=num_classes,\n aux_loss=args.aux_loss,\n pretrained=args.pretrained)\n model.to(device)\n if args.distributed:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n\n if args.resume:\n checkpoint = torch.load(args.resume, map_location='cpu')\n model.load_state_dict(checkpoint['model'])\n\n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n model_without_ddp = model.module\n\n if args.test_only:\n confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes)\n print(confmat)\n return\n\n params_to_optimize = [\n {\"params\": [p for p in model_without_ddp.backbone.parameters() if p.requires_grad]},\n {\"params\": [p for p in model_without_ddp.classifier.parameters() if p.requires_grad]},\n ]\n if args.aux_loss:\n params = [p for p in model_without_ddp.aux_classifier.parameters() if p.requires_grad]\n params_to_optimize.append({\"params\": params, \"lr\": args.lr * 10})\n optimizer = torch.optim.SGD(\n params_to_optimize,\n lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n lr_scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer,\n lambda x: (1 - x / (len(data_loader) * args.epochs)) ** 0.9)\n\n start_time = time.time()\n for epoch in range(args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, args.print_freq)\n confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes)\n print(confmat)\n utils.save_on_master(\n {\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch,\n 'args': args\n },\n os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser(description='PyTorch Segmentation Training')\n\n parser.add_argument('--dataset', default='voc', help='dataset')\n parser.add_argument('--model', default='fcn_resnet101', help='model')\n parser.add_argument('--aux-loss', action='store_true', help='auxiliar loss')\n parser.add_argument('--device', default='cuda', help='device')\n parser.add_argument('-b', '--batch-size', default=8, type=int)\n parser.add_argument('--epochs', default=30, type=int, metavar='N',\n help='number of total epochs to run')\n\n parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',\n help='number of data loading workers (default: 16)')\n parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')\n parser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\n parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\n parser.add_argument('--print-freq', default=10, type=int, help='print frequency')\n parser.add_argument('--output-dir', default='.', help='path where to save')\n parser.add_argument('--resume', default='', help='resume from checkpoint')\n parser.add_argument(\n \"--test-only\",\n dest=\"test_only\",\n help=\"Only test the model\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--pretrained\",\n dest=\"pretrained\",\n help=\"Use pre-trained models from the modelzoo\",\n action=\"store_true\",\n )\n # distributed training parameters\n parser.add_argument('--world-size', default=1, type=int,\n help='number of distributed processes')\n parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n", "path": "references/segmentation/train.py"}]} | 3,630 | 498 |
gh_patches_debug_2851 | rasdani/github-patches | git_diff | google__pytype-144 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Future-proof preconditions.py:_TOKEN_RE
3.7 adds a warning about possible future changes to re: https://bugs.python.org/issue30349
A future version of python will add nested sets, which allows nesting of sets using `[...]`. Escape the inner `[` in the re so it doesn't trigger a nested set.
Closes #140.
</issue>
<code>
[start of pytype/pytd/parse/preconditions.py]
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Preconditions for automatic argument checking."""
16
17 import re
18
19 from pytype import utils
20
21
22 class PreconditionError(ValueError):
23 pass
24
25
26 class _Precondition(object):
27 """Base class for preconditions."""
28
29 def check(self, value):
30 """Raise PreconditionError if value does not match condition."""
31 raise NotImplementedError
32
33 def allowed_types(self):
34 """Returns a set of types or typenames that are allowed."""
35 raise NotImplementedError
36
37
38 class _ClassNamePrecondition(_Precondition):
39 """Precondition that expects an instance of a specific class."""
40
41 def __init__(self, class_name):
42 super(_ClassNamePrecondition, self).__init__()
43 self._class_name = class_name
44
45 def check(self, value):
46 actual = type(value).__name__
47 if actual != self._class_name:
48 raise PreconditionError(
49 "actual=%s, expected=%s" % (actual, self._class_name))
50
51 def allowed_types(self):
52 return {self._class_name}
53
54
55 class _IsInstancePrecondition(_Precondition):
56 """Precondition that expects an instance of a class or subclass."""
57
58 def __init__(self, cls):
59 super(_IsInstancePrecondition, self).__init__()
60 self._cls = cls
61
62 def check(self, value):
63 if not isinstance(value, self._cls):
64 raise PreconditionError(
65 "actual=%s, expected_superclass=%s" % (
66 type(value).__name__, self._cls.__name__))
67
68 def allowed_types(self):
69 return {self._cls}
70
71
72 _REGISTERED_CLASSES = {}
73
74
75 def register(cls):
76 """Register a class object for use in {X} syntax."""
77 name = cls.__name__
78 assert name not in _REGISTERED_CLASSES
79 _REGISTERED_CLASSES[name] = _IsInstancePrecondition(cls)
80
81
82 class _TuplePrecondition(_Precondition):
83 """Precondition that expects a tuple."""
84
85 def __init__(self, element_condition):
86 super(_TuplePrecondition, self).__init__()
87 self._element_condition = element_condition
88
89 def check(self, value):
90 if not isinstance(value, tuple):
91 raise PreconditionError(
92 "actual=%s, expected=tuple" % type(value).__name__)
93 for v in value:
94 self._element_condition.check(v)
95
96 def allowed_types(self):
97 return self._element_condition.allowed_types()
98
99
100 class _OrPrecondition(_Precondition):
101 """Precondition that expects one of various choices to match."""
102
103 def __init__(self, choices):
104 super(_OrPrecondition, self).__init__()
105 self._choices = choices
106
107 def check(self, value):
108 errors = []
109 for c in self._choices:
110 try:
111 c.check(value)
112 return
113 except PreconditionError as e:
114 errors.append(e)
115 raise PreconditionError(
116 " or ".join("(%s)" % utils.message(e) for e in errors))
117
118 def allowed_types(self):
119 allowed = set()
120 for c in self._choices:
121 allowed |= c.allowed_types()
122 return allowed
123
124
125 class CallChecker(object):
126 """Class that performs argument checks against a collection of conditions."""
127
128 def __init__(self, condition_pairs):
129 """Create a checker given a sequence of (name, precondition) pairs."""
130 self._arg_sequence = tuple(condition_pairs)
131 self._arg_map = dict(self._arg_sequence)
132
133 def check(self, *args, **kwargs):
134 """Raise PreconditionError if the actual call is invalid."""
135 # This check is intended to be in addition to an actual call, so an
136 # incorrect number of args or undefined kwargs should be caught elsewhere.
137 for value, pair in zip(args, self._arg_sequence):
138 name, condition = pair
139 self._check_arg(condition, name, value)
140 for name, value in kwargs.items():
141 condition = self._arg_map.get(name)
142 self._check_arg(condition, name, value)
143
144 def _check_arg(self, condition, name, value):
145 if condition:
146 try:
147 condition.check(value)
148 except PreconditionError as e:
149 raise PreconditionError("argument=%s: %s." % (name, utils.message(e)))
150
151 def allowed_types(self):
152 """Determines the types and typenames allowed by calls to the checker.
153
154 Returns:
155 A set of types and/or typenames (strings). A typename matches
156 only that one class while a type matches any subclass of the type.
157 """
158 allowed = set()
159 for _, c in self._arg_sequence:
160 allowed |= c.allowed_types()
161 return allowed
162
163
164 # RE to match a single token. Leading whitepace is ignored.
165 _TOKEN_RE = re.compile(
166 r"\s*(?:(?P<literal>[[\]{}])|(?P<word>[a-zA-Z_]\w*))")
167
168 # Token codes (aside from literal characters)
169 _TOKEN_NAME = 1
170 _TOKEN_TUPLE = 2
171 _TOKEN_OR = 3
172
173 _RESERVED = {
174 "tuple": _TOKEN_TUPLE,
175 "or": _TOKEN_OR,
176 }
177
178
179 class _Parser(object):
180 """A parser for precondition specifications."""
181
182 def __init__(self, spec):
183 self._spec = spec.strip() # Must strip trailing whitespace.
184 self._pos = 0
185 self._pending_token = None
186
187 def parse(self):
188 """Parse the spec and return a precondition."""
189 cond = self._parse_or()
190 self._expect(None)
191 return cond
192
193 def _peek_token(self):
194 """Return the token code of the next token (do not consume token)."""
195 if self._pending_token is None:
196 self._pending_token = self._pop_token()
197 return self._pending_token[0]
198
199 def _pop_token(self):
200 """Consume the next token and return (token_code, token_val)."""
201 if self._pending_token is not None:
202 result = self._pending_token
203 self._pending_token = None
204 return result
205
206 if self._pos >= len(self._spec):
207 return None, None
208 m = _TOKEN_RE.match(self._spec, self._pos)
209 if not m:
210 raise ValueError("Syntax Error")
211 self._pos = m.end()
212 literal = m.group("literal")
213 if literal:
214 return literal, None
215 word = m.group("word")
216 t = _RESERVED.get(word)
217 if t:
218 return t, None
219 else:
220 return _TOKEN_NAME, word
221
222 def _expect(self, expected_code):
223 """Pop the next token, raise a ValueError if the code does not match."""
224 t, val = self._pop_token() # pylint: disable=unpacking-non-sequence
225 if t != expected_code:
226 raise ValueError("Syntax Error")
227 return val
228
229 def _parse_or(self):
230 """Parse one or more conditions separated by "or"."""
231 choices = [self._parse_one()]
232 while self._peek_token() == _TOKEN_OR:
233 self._pop_token()
234 choices.append(self._parse_one())
235 if len(choices) == 1:
236 return choices[0]
237 else:
238 return _OrPrecondition(choices)
239
240 def _parse_one(self):
241 """Parse a single condition (not including "or")."""
242 t, val = self._pop_token() # pylint: disable=unpacking-non-sequence
243 if t == _TOKEN_NAME:
244 return _ClassNamePrecondition(val if val != "None" else "NoneType")
245 elif t == "{":
246 name = self._expect(_TOKEN_NAME)
247 self._expect("}")
248 cond = _REGISTERED_CLASSES.get(name)
249 if cond is None:
250 raise ValueError(
251 "Class '%s' is not registered for preconditions." % name)
252 return cond
253 elif t == _TOKEN_TUPLE:
254 self._expect("[")
255 element = self._parse_or()
256 self._expect("]")
257 return _TuplePrecondition(element)
258 raise ValueError("Syntax Error")
259
260
261 def parse(spec):
262 """Return a _Precondition for the given string."""
263 return _Parser(spec).parse()
264
265
266 def parse_arg(arg_spec):
267 """Return (name, precondition) or (name, None) for given argument spec."""
268 name, _, spec = arg_spec.partition(":")
269 return name, parse(spec) if spec else None
270
[end of pytype/pytd/parse/preconditions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytype/pytd/parse/preconditions.py b/pytype/pytd/parse/preconditions.py
--- a/pytype/pytd/parse/preconditions.py
+++ b/pytype/pytd/parse/preconditions.py
@@ -163,7 +163,7 @@
# RE to match a single token. Leading whitepace is ignored.
_TOKEN_RE = re.compile(
- r"\s*(?:(?P<literal>[[\]{}])|(?P<word>[a-zA-Z_]\w*))")
+ r"\s*(?:(?P<literal>[\[\]{}])|(?P<word>[a-zA-Z_]\w*))")
# Token codes (aside from literal characters)
_TOKEN_NAME = 1
| {"golden_diff": "diff --git a/pytype/pytd/parse/preconditions.py b/pytype/pytd/parse/preconditions.py\n--- a/pytype/pytd/parse/preconditions.py\n+++ b/pytype/pytd/parse/preconditions.py\n@@ -163,7 +163,7 @@\n \n # RE to match a single token. Leading whitepace is ignored.\n _TOKEN_RE = re.compile(\n- r\"\\s*(?:(?P<literal>[[\\]{}])|(?P<word>[a-zA-Z_]\\w*))\")\n+ r\"\\s*(?:(?P<literal>[\\[\\]{}])|(?P<word>[a-zA-Z_]\\w*))\")\n \n # Token codes (aside from literal characters)\n _TOKEN_NAME = 1\n", "issue": "Future-proof preconditions.py:_TOKEN_RE\n3.7 adds a warning about possible future changes to re: https://bugs.python.org/issue30349\r\nA future version of python will add nested sets, which allows nesting of sets using `[...]`. Escape the inner `[` in the re so it doesn't trigger a nested set.\r\nCloses #140.\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Preconditions for automatic argument checking.\"\"\"\n\nimport re\n\nfrom pytype import utils\n\n\nclass PreconditionError(ValueError):\n pass\n\n\nclass _Precondition(object):\n \"\"\"Base class for preconditions.\"\"\"\n\n def check(self, value):\n \"\"\"Raise PreconditionError if value does not match condition.\"\"\"\n raise NotImplementedError\n\n def allowed_types(self):\n \"\"\"Returns a set of types or typenames that are allowed.\"\"\"\n raise NotImplementedError\n\n\nclass _ClassNamePrecondition(_Precondition):\n \"\"\"Precondition that expects an instance of a specific class.\"\"\"\n\n def __init__(self, class_name):\n super(_ClassNamePrecondition, self).__init__()\n self._class_name = class_name\n\n def check(self, value):\n actual = type(value).__name__\n if actual != self._class_name:\n raise PreconditionError(\n \"actual=%s, expected=%s\" % (actual, self._class_name))\n\n def allowed_types(self):\n return {self._class_name}\n\n\nclass _IsInstancePrecondition(_Precondition):\n \"\"\"Precondition that expects an instance of a class or subclass.\"\"\"\n\n def __init__(self, cls):\n super(_IsInstancePrecondition, self).__init__()\n self._cls = cls\n\n def check(self, value):\n if not isinstance(value, self._cls):\n raise PreconditionError(\n \"actual=%s, expected_superclass=%s\" % (\n type(value).__name__, self._cls.__name__))\n\n def allowed_types(self):\n return {self._cls}\n\n\n_REGISTERED_CLASSES = {}\n\n\ndef register(cls):\n \"\"\"Register a class object for use in {X} syntax.\"\"\"\n name = cls.__name__\n assert name not in _REGISTERED_CLASSES\n _REGISTERED_CLASSES[name] = _IsInstancePrecondition(cls)\n\n\nclass _TuplePrecondition(_Precondition):\n \"\"\"Precondition that expects a tuple.\"\"\"\n\n def __init__(self, element_condition):\n super(_TuplePrecondition, self).__init__()\n self._element_condition = element_condition\n\n def check(self, value):\n if not isinstance(value, tuple):\n raise PreconditionError(\n \"actual=%s, expected=tuple\" % type(value).__name__)\n for v in value:\n self._element_condition.check(v)\n\n def allowed_types(self):\n return self._element_condition.allowed_types()\n\n\nclass _OrPrecondition(_Precondition):\n \"\"\"Precondition that expects one of various choices to match.\"\"\"\n\n def __init__(self, choices):\n super(_OrPrecondition, self).__init__()\n self._choices = choices\n\n def check(self, value):\n errors = []\n for c in self._choices:\n try:\n c.check(value)\n return\n except PreconditionError as e:\n errors.append(e)\n raise PreconditionError(\n \" or \".join(\"(%s)\" % utils.message(e) for e in errors))\n\n def allowed_types(self):\n allowed = set()\n for c in self._choices:\n allowed |= c.allowed_types()\n return allowed\n\n\nclass CallChecker(object):\n \"\"\"Class that performs argument checks against a collection of conditions.\"\"\"\n\n def __init__(self, condition_pairs):\n \"\"\"Create a checker given a sequence of (name, precondition) pairs.\"\"\"\n self._arg_sequence = tuple(condition_pairs)\n self._arg_map = dict(self._arg_sequence)\n\n def check(self, *args, **kwargs):\n \"\"\"Raise PreconditionError if the actual call is invalid.\"\"\"\n # This check is intended to be in addition to an actual call, so an\n # incorrect number of args or undefined kwargs should be caught elsewhere.\n for value, pair in zip(args, self._arg_sequence):\n name, condition = pair\n self._check_arg(condition, name, value)\n for name, value in kwargs.items():\n condition = self._arg_map.get(name)\n self._check_arg(condition, name, value)\n\n def _check_arg(self, condition, name, value):\n if condition:\n try:\n condition.check(value)\n except PreconditionError as e:\n raise PreconditionError(\"argument=%s: %s.\" % (name, utils.message(e)))\n\n def allowed_types(self):\n \"\"\"Determines the types and typenames allowed by calls to the checker.\n\n Returns:\n A set of types and/or typenames (strings). A typename matches\n only that one class while a type matches any subclass of the type.\n \"\"\"\n allowed = set()\n for _, c in self._arg_sequence:\n allowed |= c.allowed_types()\n return allowed\n\n\n# RE to match a single token. Leading whitepace is ignored.\n_TOKEN_RE = re.compile(\n r\"\\s*(?:(?P<literal>[[\\]{}])|(?P<word>[a-zA-Z_]\\w*))\")\n\n# Token codes (aside from literal characters)\n_TOKEN_NAME = 1\n_TOKEN_TUPLE = 2\n_TOKEN_OR = 3\n\n_RESERVED = {\n \"tuple\": _TOKEN_TUPLE,\n \"or\": _TOKEN_OR,\n}\n\n\nclass _Parser(object):\n \"\"\"A parser for precondition specifications.\"\"\"\n\n def __init__(self, spec):\n self._spec = spec.strip() # Must strip trailing whitespace.\n self._pos = 0\n self._pending_token = None\n\n def parse(self):\n \"\"\"Parse the spec and return a precondition.\"\"\"\n cond = self._parse_or()\n self._expect(None)\n return cond\n\n def _peek_token(self):\n \"\"\"Return the token code of the next token (do not consume token).\"\"\"\n if self._pending_token is None:\n self._pending_token = self._pop_token()\n return self._pending_token[0]\n\n def _pop_token(self):\n \"\"\"Consume the next token and return (token_code, token_val).\"\"\"\n if self._pending_token is not None:\n result = self._pending_token\n self._pending_token = None\n return result\n\n if self._pos >= len(self._spec):\n return None, None\n m = _TOKEN_RE.match(self._spec, self._pos)\n if not m:\n raise ValueError(\"Syntax Error\")\n self._pos = m.end()\n literal = m.group(\"literal\")\n if literal:\n return literal, None\n word = m.group(\"word\")\n t = _RESERVED.get(word)\n if t:\n return t, None\n else:\n return _TOKEN_NAME, word\n\n def _expect(self, expected_code):\n \"\"\"Pop the next token, raise a ValueError if the code does not match.\"\"\"\n t, val = self._pop_token() # pylint: disable=unpacking-non-sequence\n if t != expected_code:\n raise ValueError(\"Syntax Error\")\n return val\n\n def _parse_or(self):\n \"\"\"Parse one or more conditions separated by \"or\".\"\"\"\n choices = [self._parse_one()]\n while self._peek_token() == _TOKEN_OR:\n self._pop_token()\n choices.append(self._parse_one())\n if len(choices) == 1:\n return choices[0]\n else:\n return _OrPrecondition(choices)\n\n def _parse_one(self):\n \"\"\"Parse a single condition (not including \"or\").\"\"\"\n t, val = self._pop_token() # pylint: disable=unpacking-non-sequence\n if t == _TOKEN_NAME:\n return _ClassNamePrecondition(val if val != \"None\" else \"NoneType\")\n elif t == \"{\":\n name = self._expect(_TOKEN_NAME)\n self._expect(\"}\")\n cond = _REGISTERED_CLASSES.get(name)\n if cond is None:\n raise ValueError(\n \"Class '%s' is not registered for preconditions.\" % name)\n return cond\n elif t == _TOKEN_TUPLE:\n self._expect(\"[\")\n element = self._parse_or()\n self._expect(\"]\")\n return _TuplePrecondition(element)\n raise ValueError(\"Syntax Error\")\n\n\ndef parse(spec):\n \"\"\"Return a _Precondition for the given string.\"\"\"\n return _Parser(spec).parse()\n\n\ndef parse_arg(arg_spec):\n \"\"\"Return (name, precondition) or (name, None) for given argument spec.\"\"\"\n name, _, spec = arg_spec.partition(\":\")\n return name, parse(spec) if spec else None\n", "path": "pytype/pytd/parse/preconditions.py"}]} | 3,272 | 166 |
gh_patches_debug_8696 | rasdani/github-patches | git_diff | easybuilders__easybuild-framework-757 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
santiy_check_commands doesn't work for ipython
</issue>
<code>
[start of easybuild/framework/extensioneasyblock.py]
1 ##
2 # Copyright 2013 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of the University of Ghent (http://ugent.be/hpc).
6 #
7 # http://github.com/hpcugent/easybuild
8 #
9 # EasyBuild is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation v2.
12 #
13 # EasyBuild is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
20 ##
21 """
22 EasyBuild support for building and installing extensions as actual extensions or as stand-alone modules,
23 implemented as an easyblock
24
25 @author: Kenneth Hoste (Ghent University)
26 """
27 import copy
28 import os
29
30 from easybuild.framework.easyblock import EasyBlock
31 from easybuild.framework.easyconfig import CUSTOM
32 from easybuild.framework.extension import Extension
33 from easybuild.tools.filetools import apply_patch, extract_file
34 from easybuild.tools.utilities import remove_unwanted_chars
35
36
37 class ExtensionEasyBlock(EasyBlock, Extension):
38 """
39 Install an extension as a separate module, or as an extension.
40
41 Deriving classes should implement the following functions:
42 * required EasyBlock functions:
43 - configure_step
44 - build_step
45 - install_step
46 * required Extension functions
47 - run
48 """
49
50 @staticmethod
51 def extra_options(extra_vars=None):
52 """Extra easyconfig parameters specific to ExtensionEasyBlock."""
53
54 # using [] as default value is a bad idea, so we handle it this way
55 if extra_vars is None:
56 extra_vars = []
57
58 extra_vars.extend([
59 ('options', [{}, "Dictionary with extension options.", CUSTOM]),
60 ])
61 return EasyBlock.extra_options(extra_vars)
62
63 def __init__(self, *args, **kwargs):
64 """Initialize either as EasyBlock or as Extension."""
65
66 self.is_extension = False
67
68 if isinstance(args[0], EasyBlock):
69 Extension.__init__(self, *args, **kwargs)
70 # name and version properties of EasyBlock are used, so make sure name and version are correct
71 self.cfg['name'] = self.ext.get('name', None)
72 self.cfg['version'] = self.ext.get('version', None)
73 self.builddir = self.master.builddir
74 self.installdir = self.master.installdir
75 self.is_extension = True
76 self.unpack_options = None
77 else:
78 EasyBlock.__init__(self, *args, **kwargs)
79 self.options = copy.deepcopy(self.cfg.get('options', {})) # we need this for Extension.sanity_check_step
80
81 self.ext_dir = None # dir where extension source was unpacked
82
83 def run(self, unpack_src=False):
84 """Common operations for extensions: unpacking sources, patching, ..."""
85
86 # unpack file if desired
87 if unpack_src:
88 targetdir = os.path.join(self.master.builddir, remove_unwanted_chars(self.name))
89 self.ext_dir = extract_file("%s" % self.src, targetdir, extra_options=self.unpack_options)
90
91 # patch if needed
92 if self.patches:
93 for patchfile in self.patches:
94 if not apply_patch(patchfile, self.ext_dir):
95 self.log.error("Applying patch %s failed" % patchfile)
96
97 def sanity_check_step(self, exts_filter=None, custom_paths=None, custom_commands=None):
98 """
99 Custom sanity check for extensions, whether installed as stand-alone module or not
100 """
101 if not self.cfg['exts_filter']:
102 self.cfg['exts_filter'] = exts_filter
103 self.log.debug("starting sanity check for extension with filter %s", self.cfg['exts_filter'])
104
105 if not self.is_extension:
106 # load fake module
107 fake_mod_data = self.load_fake_module(purge=True)
108
109 # perform sanity check
110 sanity_check_ok = Extension.sanity_check_step(self)
111
112 if not self.is_extension:
113 # unload fake module and clean up
114 self.clean_up_fake_module(fake_mod_data)
115
116 if custom_paths or custom_commands:
117 EasyBlock.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands,
118 extension=self.is_extension)
119
120 # pass or fail sanity check
121 if not sanity_check_ok:
122 msg = "Sanity check for %s failed: %s" % (self.name, '; '.join(self.sanity_check_fail_msgs))
123 if self.is_extension:
124 self.log.warning(msg)
125 else:
126 self.log.error(msg)
127 return False
128 else:
129 self.log.info("Sanity check for %s successful!" % self.name)
130 return True
131
132 def make_module_extra(self, extra=None):
133 """Add custom entries to module."""
134
135 txt = EasyBlock.make_module_extra(self)
136 if not extra is None:
137 txt += extra
138 return txt
139
[end of easybuild/framework/extensioneasyblock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/easybuild/framework/extensioneasyblock.py b/easybuild/framework/extensioneasyblock.py
--- a/easybuild/framework/extensioneasyblock.py
+++ b/easybuild/framework/extensioneasyblock.py
@@ -113,7 +113,7 @@
# unload fake module and clean up
self.clean_up_fake_module(fake_mod_data)
- if custom_paths or custom_commands:
+ if custom_paths or self.cfg['sanity_check_paths'] or custom_commands or self.cfg['sanity_check_commands']:
EasyBlock.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands,
extension=self.is_extension)
| {"golden_diff": "diff --git a/easybuild/framework/extensioneasyblock.py b/easybuild/framework/extensioneasyblock.py\n--- a/easybuild/framework/extensioneasyblock.py\n+++ b/easybuild/framework/extensioneasyblock.py\n@@ -113,7 +113,7 @@\n # unload fake module and clean up\n self.clean_up_fake_module(fake_mod_data)\n \n- if custom_paths or custom_commands:\n+ if custom_paths or self.cfg['sanity_check_paths'] or custom_commands or self.cfg['sanity_check_commands']:\n EasyBlock.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands,\n extension=self.is_extension)\n", "issue": "santiy_check_commands doesn't work for ipython\n\n", "before_files": [{"content": "##\n# Copyright 2013 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of the University of Ghent (http://ugent.be/hpc).\n#\n# http://github.com/hpcugent/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing extensions as actual extensions or as stand-alone modules,\nimplemented as an easyblock\n\n@author: Kenneth Hoste (Ghent University)\n\"\"\"\nimport copy\nimport os\n\nfrom easybuild.framework.easyblock import EasyBlock\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.framework.extension import Extension\nfrom easybuild.tools.filetools import apply_patch, extract_file\nfrom easybuild.tools.utilities import remove_unwanted_chars\n\n\nclass ExtensionEasyBlock(EasyBlock, Extension):\n \"\"\"\n Install an extension as a separate module, or as an extension.\n\n Deriving classes should implement the following functions:\n * required EasyBlock functions:\n - configure_step\n - build_step\n - install_step\n * required Extension functions\n - run\n \"\"\"\n\n @staticmethod\n def extra_options(extra_vars=None):\n \"\"\"Extra easyconfig parameters specific to ExtensionEasyBlock.\"\"\"\n\n # using [] as default value is a bad idea, so we handle it this way\n if extra_vars is None:\n extra_vars = []\n\n extra_vars.extend([\n ('options', [{}, \"Dictionary with extension options.\", CUSTOM]),\n ])\n return EasyBlock.extra_options(extra_vars)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize either as EasyBlock or as Extension.\"\"\"\n\n self.is_extension = False\n\n if isinstance(args[0], EasyBlock):\n Extension.__init__(self, *args, **kwargs)\n # name and version properties of EasyBlock are used, so make sure name and version are correct\n self.cfg['name'] = self.ext.get('name', None)\n self.cfg['version'] = self.ext.get('version', None)\n self.builddir = self.master.builddir\n self.installdir = self.master.installdir\n self.is_extension = True\n self.unpack_options = None\n else:\n EasyBlock.__init__(self, *args, **kwargs)\n self.options = copy.deepcopy(self.cfg.get('options', {})) # we need this for Extension.sanity_check_step\n\n self.ext_dir = None # dir where extension source was unpacked\n\n def run(self, unpack_src=False):\n \"\"\"Common operations for extensions: unpacking sources, patching, ...\"\"\"\n\n # unpack file if desired\n if unpack_src:\n targetdir = os.path.join(self.master.builddir, remove_unwanted_chars(self.name))\n self.ext_dir = extract_file(\"%s\" % self.src, targetdir, extra_options=self.unpack_options)\n\n # patch if needed\n if self.patches:\n for patchfile in self.patches:\n if not apply_patch(patchfile, self.ext_dir):\n self.log.error(\"Applying patch %s failed\" % patchfile)\n\n def sanity_check_step(self, exts_filter=None, custom_paths=None, custom_commands=None):\n \"\"\"\n Custom sanity check for extensions, whether installed as stand-alone module or not\n \"\"\"\n if not self.cfg['exts_filter']:\n self.cfg['exts_filter'] = exts_filter\n self.log.debug(\"starting sanity check for extension with filter %s\", self.cfg['exts_filter'])\n\n if not self.is_extension:\n # load fake module\n fake_mod_data = self.load_fake_module(purge=True)\n\n # perform sanity check\n sanity_check_ok = Extension.sanity_check_step(self)\n\n if not self.is_extension:\n # unload fake module and clean up\n self.clean_up_fake_module(fake_mod_data)\n\n if custom_paths or custom_commands:\n EasyBlock.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands,\n extension=self.is_extension)\n\n # pass or fail sanity check\n if not sanity_check_ok:\n msg = \"Sanity check for %s failed: %s\" % (self.name, '; '.join(self.sanity_check_fail_msgs))\n if self.is_extension:\n self.log.warning(msg)\n else:\n self.log.error(msg)\n return False\n else:\n self.log.info(\"Sanity check for %s successful!\" % self.name)\n return True\n\n def make_module_extra(self, extra=None):\n \"\"\"Add custom entries to module.\"\"\"\n\n txt = EasyBlock.make_module_extra(self)\n if not extra is None:\n txt += extra\n return txt\n", "path": "easybuild/framework/extensioneasyblock.py"}]} | 1,981 | 144 |
gh_patches_debug_3275 | rasdani/github-patches | git_diff | apache__tvm-6502 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[TOPI] Typo in operator key
https://github.com/apache/incubator-tvm/blob/bdfefbb03f5aab96ee677ee28a166dd6ab5dbf3f/python/tvm/topi/bifrost/dense.py#L26
"biforst" should be "bifrost". This bug makes the op totally unavailable in Relay.
I can fix this bug if expected, but I don't know how to add a proper test.
</issue>
<code>
[start of python/tvm/topi/bifrost/dense.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 # pylint: disable=invalid-name,unused-variable
18 """dense schedule on ARM Mali Biforst GPU"""
19 from tvm import te
20 from tvm import autotvm
21
22 from .. import nn
23 from ..util import traverse_inline
24
25
26 @autotvm.register_topi_compute("dense.biforst")
27 def dense(_, data, weight, bias=None, out_dtype=None):
28 """Dense operator on Biforst"""
29 return nn.dense(data, weight, bias, out_dtype)
30
31
32 @autotvm.register_topi_schedule("dense.bifrost")
33 def schedule_dense(cfg, outs):
34 """Schedule for dense operator.
35
36 Parameters
37 ----------
38 cfg: ConfigEntity
39 The config entity for this template
40 outs: Array of Tensor
41 The computation graph description of dense
42 in the format of an array of tensors.
43
44 Returns
45 -------
46 s: Schedule
47 The computation schedule for dense.
48 """
49 outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
50 s = te.create_schedule([x.op for x in outs])
51
52 def _callback(op):
53 if op.tag == "dense":
54 vec_size = [1, 2, 4, 8, 16]
55 max_unroll = 32
56
57 dense_out = op.output(0)
58 output = outs[0]
59
60 y, x = s[output].op.axis
61 c = s[dense_out].op.reduce_axis[0]
62
63 ##### space definition begin #####
64 cfg.define_split("tile_y", y, num_outputs=3)
65 cfg.define_split("tile_x", x, num_outputs=3)
66 cfg.define_split("c_unroll", c, num_outputs=2, max_factor=64)
67
68 # fallback support
69 if cfg.is_fallback:
70 ref_log = autotvm.tophub.load_reference_log("mali", "rk3399", "dense.bifrost")
71 cfg.fallback_with_reference_log(ref_log)
72 ##### space definition end #####
73
74 if dense_out.op in s.outputs:
75 dense_out = s.cache_write(output, "local")
76
77 by, ty, yi = cfg["tile_y"].apply(s, output, y)
78 bx, tx, xi = cfg["tile_x"].apply(s, output, x)
79
80 s[output].bind(by, te.thread_axis("blockIdx.y"))
81 s[output].bind(bx, te.thread_axis("blockIdx.x"))
82 s[output].bind(ty, te.thread_axis("threadIdx.y"))
83 s[output].bind(tx, te.thread_axis("threadIdx.x"))
84
85 if cfg["tile_y"].size[-1] < max_unroll:
86 s[output].unroll(yi)
87 if cfg["tile_x"].size[-1] in vec_size:
88 s[output].vectorize(xi)
89 s[dense_out].compute_at(s[output], tx)
90
91 k = s[dense_out].op.reduce_axis[0]
92 y, x = s[dense_out].op.axis
93 k, k_unroll = cfg["c_unroll"].apply(s, dense_out, k)
94 s[dense_out].reorder(k, k_unroll, y, x)
95 s[dense_out].unroll(k_unroll)
96 if cfg["tile_y"].size[-1] < max_unroll:
97 s[dense_out].unroll(y)
98 if cfg["tile_x"].size[-1] in vec_size:
99 s[dense_out].vectorize(x)
100
101 traverse_inline(s, outs[0].op, _callback)
102 return s
103
104
105 def fuse_and_bind(s, tensor, axis=None, num_thread=None):
106 """ fuse all the axis and bind to GPU threads """
107 axis = axis or s[tensor].op.axis
108 fused = s[tensor].fuse(*axis)
109 bx, tx = s[tensor].split(fused, num_thread)
110 s[tensor].bind(bx, te.thread_axis("blockIdx.x"))
111 s[tensor].bind(tx, te.thread_axis("threadIdx.x"))
112 return bx, tx
113
[end of python/tvm/topi/bifrost/dense.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/tvm/topi/bifrost/dense.py b/python/tvm/topi/bifrost/dense.py
--- a/python/tvm/topi/bifrost/dense.py
+++ b/python/tvm/topi/bifrost/dense.py
@@ -23,7 +23,7 @@
from ..util import traverse_inline
[email protected]_topi_compute("dense.biforst")
[email protected]_topi_compute("dense.bifrost")
def dense(_, data, weight, bias=None, out_dtype=None):
"""Dense operator on Biforst"""
return nn.dense(data, weight, bias, out_dtype)
| {"golden_diff": "diff --git a/python/tvm/topi/bifrost/dense.py b/python/tvm/topi/bifrost/dense.py\n--- a/python/tvm/topi/bifrost/dense.py\n+++ b/python/tvm/topi/bifrost/dense.py\n@@ -23,7 +23,7 @@\n from ..util import traverse_inline\n \n \[email protected]_topi_compute(\"dense.biforst\")\[email protected]_topi_compute(\"dense.bifrost\")\n def dense(_, data, weight, bias=None, out_dtype=None):\n \"\"\"Dense operator on Biforst\"\"\"\n return nn.dense(data, weight, bias, out_dtype)\n", "issue": "[TOPI] Typo in operator key\nhttps://github.com/apache/incubator-tvm/blob/bdfefbb03f5aab96ee677ee28a166dd6ab5dbf3f/python/tvm/topi/bifrost/dense.py#L26\r\n\r\n\"biforst\" should be \"bifrost\". This bug makes the op totally unavailable in Relay.\r\n\r\nI can fix this bug if expected, but I don't know how to add a proper test.\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name,unused-variable\n\"\"\"dense schedule on ARM Mali Biforst GPU\"\"\"\nfrom tvm import te\nfrom tvm import autotvm\n\nfrom .. import nn\nfrom ..util import traverse_inline\n\n\[email protected]_topi_compute(\"dense.biforst\")\ndef dense(_, data, weight, bias=None, out_dtype=None):\n \"\"\"Dense operator on Biforst\"\"\"\n return nn.dense(data, weight, bias, out_dtype)\n\n\[email protected]_topi_schedule(\"dense.bifrost\")\ndef schedule_dense(cfg, outs):\n \"\"\"Schedule for dense operator.\n\n Parameters\n ----------\n cfg: ConfigEntity\n The config entity for this template\n outs: Array of Tensor\n The computation graph description of dense\n in the format of an array of tensors.\n\n Returns\n -------\n s: Schedule\n The computation schedule for dense.\n \"\"\"\n outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs\n s = te.create_schedule([x.op for x in outs])\n\n def _callback(op):\n if op.tag == \"dense\":\n vec_size = [1, 2, 4, 8, 16]\n max_unroll = 32\n\n dense_out = op.output(0)\n output = outs[0]\n\n y, x = s[output].op.axis\n c = s[dense_out].op.reduce_axis[0]\n\n ##### space definition begin #####\n cfg.define_split(\"tile_y\", y, num_outputs=3)\n cfg.define_split(\"tile_x\", x, num_outputs=3)\n cfg.define_split(\"c_unroll\", c, num_outputs=2, max_factor=64)\n\n # fallback support\n if cfg.is_fallback:\n ref_log = autotvm.tophub.load_reference_log(\"mali\", \"rk3399\", \"dense.bifrost\")\n cfg.fallback_with_reference_log(ref_log)\n ##### space definition end #####\n\n if dense_out.op in s.outputs:\n dense_out = s.cache_write(output, \"local\")\n\n by, ty, yi = cfg[\"tile_y\"].apply(s, output, y)\n bx, tx, xi = cfg[\"tile_x\"].apply(s, output, x)\n\n s[output].bind(by, te.thread_axis(\"blockIdx.y\"))\n s[output].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[output].bind(ty, te.thread_axis(\"threadIdx.y\"))\n s[output].bind(tx, te.thread_axis(\"threadIdx.x\"))\n\n if cfg[\"tile_y\"].size[-1] < max_unroll:\n s[output].unroll(yi)\n if cfg[\"tile_x\"].size[-1] in vec_size:\n s[output].vectorize(xi)\n s[dense_out].compute_at(s[output], tx)\n\n k = s[dense_out].op.reduce_axis[0]\n y, x = s[dense_out].op.axis\n k, k_unroll = cfg[\"c_unroll\"].apply(s, dense_out, k)\n s[dense_out].reorder(k, k_unroll, y, x)\n s[dense_out].unroll(k_unroll)\n if cfg[\"tile_y\"].size[-1] < max_unroll:\n s[dense_out].unroll(y)\n if cfg[\"tile_x\"].size[-1] in vec_size:\n s[dense_out].vectorize(x)\n\n traverse_inline(s, outs[0].op, _callback)\n return s\n\n\ndef fuse_and_bind(s, tensor, axis=None, num_thread=None):\n \"\"\" fuse all the axis and bind to GPU threads \"\"\"\n axis = axis or s[tensor].op.axis\n fused = s[tensor].fuse(*axis)\n bx, tx = s[tensor].split(fused, num_thread)\n s[tensor].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[tensor].bind(tx, te.thread_axis(\"threadIdx.x\"))\n return bx, tx\n", "path": "python/tvm/topi/bifrost/dense.py"}]} | 1,947 | 143 |
gh_patches_debug_20922 | rasdani/github-patches | git_diff | pystiche__pystiche-228 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MD5 hash error
Hi, I get this error when running the script given in the example for beginner.
`FileExistsError: bird1.jpg with a different MD5 hash already exists in /root/.cache/pystiche. If you want to overwrite it, set overwrite=True.`
</issue>
<code>
[start of pystiche/demo.py]
1 import logging
2 import sys
3
4 from pystiche.data import (
5 DownloadableImage,
6 DownloadableImageCollection,
7 PixabayLicense,
8 PublicDomainLicense,
9 )
10 from pystiche.optim import OptimLogger
11
12 __all__ = ["demo_images", "demo_logger"]
13
14
15 def demo_images():
16 return DownloadableImageCollection(
17 {
18 "dancing": DownloadableImage(
19 "https://pytorch.org/tutorials/_static/img/neural-style/dancing.jpg",
20 md5="0a2df538901452d639170a2ed89815a4",
21 ),
22 "picasso": DownloadableImage(
23 "https://pytorch.org/tutorials/_static/img/neural-style/picasso.jpg",
24 md5="d1d60fc3f9d0b22d2d826c47934a37ea",
25 ),
26 "bird1": DownloadableImage(
27 "https://cdn.pixabay.com/photo/2016/01/14/11/26/bird-1139734_960_720.jpg",
28 file="bird1.jpg",
29 author="gholmz0",
30 date="09.03.2013",
31 license=PixabayLicense(),
32 md5="d42444d3cd0afa47f07066cd083d6cea",
33 ),
34 "paint": DownloadableImage(
35 "https://cdn.pixabay.com/photo/2017/07/03/20/17/abstract-2468874_960_720.jpg",
36 file="paint.jpg",
37 author="garageband",
38 date="03.07.2017",
39 license=PixabayLicense(),
40 md5="a991e222806ef49d34b172a67cf97d91",
41 ),
42 "bird2": DownloadableImage(
43 "https://cdn.pixabay.com/photo/2013/03/12/17/53/bird-92956_960_720.jpg",
44 file="bird2.jpg",
45 author="12019",
46 date="09.04.2012",
47 license=PixabayLicense(),
48 md5="dda3e1d0f93f783de823b4f91129d44e",
49 ),
50 "mosaic": DownloadableImage(
51 "https://upload.wikimedia.org/wikipedia/commons/2/23/Mosaic_ducks_Massimo.jpg",
52 file="mosaic.jpg",
53 author="Marie-Lan Nguyen",
54 date="2006",
55 license=PublicDomainLicense(),
56 md5="5b60cd1724395f7a0c21dc6dd006f8ae",
57 ),
58 }
59 )
60
61
62 def demo_logger():
63 logger = logging.getLogger("demo_logger")
64 logger.setLevel(logging.INFO)
65
66 sh = logging.StreamHandler(sys.stdout)
67 sh.setLevel(logging.INFO)
68 logger.addHandler(sh)
69
70 return OptimLogger(logger)
71
[end of pystiche/demo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pystiche/demo.py b/pystiche/demo.py
--- a/pystiche/demo.py
+++ b/pystiche/demo.py
@@ -29,7 +29,7 @@
author="gholmz0",
date="09.03.2013",
license=PixabayLicense(),
- md5="d42444d3cd0afa47f07066cd083d6cea",
+ md5="36e5fef725943a5d1d22b5048095da86",
),
"paint": DownloadableImage(
"https://cdn.pixabay.com/photo/2017/07/03/20/17/abstract-2468874_960_720.jpg",
@@ -45,7 +45,7 @@
author="12019",
date="09.04.2012",
license=PixabayLicense(),
- md5="dda3e1d0f93f783de823b4f91129d44e",
+ md5="8c5b608bd579d931e2cfe7229840fe9b",
),
"mosaic": DownloadableImage(
"https://upload.wikimedia.org/wikipedia/commons/2/23/Mosaic_ducks_Massimo.jpg",
| {"golden_diff": "diff --git a/pystiche/demo.py b/pystiche/demo.py\n--- a/pystiche/demo.py\n+++ b/pystiche/demo.py\n@@ -29,7 +29,7 @@\n author=\"gholmz0\",\n date=\"09.03.2013\",\n license=PixabayLicense(),\n- md5=\"d42444d3cd0afa47f07066cd083d6cea\",\n+ md5=\"36e5fef725943a5d1d22b5048095da86\",\n ),\n \"paint\": DownloadableImage(\n \"https://cdn.pixabay.com/photo/2017/07/03/20/17/abstract-2468874_960_720.jpg\",\n@@ -45,7 +45,7 @@\n author=\"12019\",\n date=\"09.04.2012\",\n license=PixabayLicense(),\n- md5=\"dda3e1d0f93f783de823b4f91129d44e\",\n+ md5=\"8c5b608bd579d931e2cfe7229840fe9b\",\n ),\n \"mosaic\": DownloadableImage(\n \"https://upload.wikimedia.org/wikipedia/commons/2/23/Mosaic_ducks_Massimo.jpg\",\n", "issue": "MD5 hash error\nHi, I get this error when running the script given in the example for beginner.\r\n\r\n`FileExistsError: bird1.jpg with a different MD5 hash already exists in /root/.cache/pystiche. If you want to overwrite it, set overwrite=True.`\n", "before_files": [{"content": "import logging\nimport sys\n\nfrom pystiche.data import (\n DownloadableImage,\n DownloadableImageCollection,\n PixabayLicense,\n PublicDomainLicense,\n)\nfrom pystiche.optim import OptimLogger\n\n__all__ = [\"demo_images\", \"demo_logger\"]\n\n\ndef demo_images():\n return DownloadableImageCollection(\n {\n \"dancing\": DownloadableImage(\n \"https://pytorch.org/tutorials/_static/img/neural-style/dancing.jpg\",\n md5=\"0a2df538901452d639170a2ed89815a4\",\n ),\n \"picasso\": DownloadableImage(\n \"https://pytorch.org/tutorials/_static/img/neural-style/picasso.jpg\",\n md5=\"d1d60fc3f9d0b22d2d826c47934a37ea\",\n ),\n \"bird1\": DownloadableImage(\n \"https://cdn.pixabay.com/photo/2016/01/14/11/26/bird-1139734_960_720.jpg\",\n file=\"bird1.jpg\",\n author=\"gholmz0\",\n date=\"09.03.2013\",\n license=PixabayLicense(),\n md5=\"d42444d3cd0afa47f07066cd083d6cea\",\n ),\n \"paint\": DownloadableImage(\n \"https://cdn.pixabay.com/photo/2017/07/03/20/17/abstract-2468874_960_720.jpg\",\n file=\"paint.jpg\",\n author=\"garageband\",\n date=\"03.07.2017\",\n license=PixabayLicense(),\n md5=\"a991e222806ef49d34b172a67cf97d91\",\n ),\n \"bird2\": DownloadableImage(\n \"https://cdn.pixabay.com/photo/2013/03/12/17/53/bird-92956_960_720.jpg\",\n file=\"bird2.jpg\",\n author=\"12019\",\n date=\"09.04.2012\",\n license=PixabayLicense(),\n md5=\"dda3e1d0f93f783de823b4f91129d44e\",\n ),\n \"mosaic\": DownloadableImage(\n \"https://upload.wikimedia.org/wikipedia/commons/2/23/Mosaic_ducks_Massimo.jpg\",\n file=\"mosaic.jpg\",\n author=\"Marie-Lan Nguyen\",\n date=\"2006\",\n license=PublicDomainLicense(),\n md5=\"5b60cd1724395f7a0c21dc6dd006f8ae\",\n ),\n }\n )\n\n\ndef demo_logger():\n logger = logging.getLogger(\"demo_logger\")\n logger.setLevel(logging.INFO)\n\n sh = logging.StreamHandler(sys.stdout)\n sh.setLevel(logging.INFO)\n logger.addHandler(sh)\n\n return OptimLogger(logger)\n", "path": "pystiche/demo.py"}]} | 1,460 | 345 |
gh_patches_debug_28865 | rasdani/github-patches | git_diff | bokeh__bokeh-2790 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create example of using Hover tool to display custom images
It would be nice to show how someone can use the hovertool to display custom images using URL/URI upon hovering over a region of interest. It would allow users to embed an additional dimension into plots.
</issue>
<code>
[start of sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py]
1 from bokeh.plotting import figure, output_file, show, ColumnDataSource
2 from bokeh.models import HoverTool
3
4 output_file("toolbar.html")
5
6 source = ColumnDataSource(
7 data=dict(
8 x=[1, 2, 3, 4, 5],
9 y=[2, 5, 8, 2, 7],
10 desc=['A', 'b', 'C', 'd', 'E'],
11 )
12 )
13
14 hover = HoverTool(
15 tooltips="""
16 <div>
17 <span style="font-size: 17px; font-weight: bold;">@desc</span>
18 <span style="font-size: 15px; color: #966;">[$index]</span>
19 </div>
20 <div>
21 <span style="font-size: 15px;">Location</span>
22 <span style="font-size: 10px; color: #696;">($x, $y)</span>
23 </div>
24 """
25 )
26
27 p = figure(plot_width=400, plot_height=400, tools=[hover],
28 title="Mouse over the dots")
29
30 p.circle('x', 'y', size=20, source=source)
31
32 show(p)
33
34
35
[end of sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py b/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py
--- a/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py
+++ b/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py
@@ -8,18 +8,34 @@
x=[1, 2, 3, 4, 5],
y=[2, 5, 8, 2, 7],
desc=['A', 'b', 'C', 'd', 'E'],
+ imgs = [
+ 'http://bokeh.pydata.org/static/snake.jpg',
+ 'http://bokeh.pydata.org/static/snake2.png',
+ 'http://bokeh.pydata.org/static/snake3D.png',
+ 'http://bokeh.pydata.org/static/snake4_TheRevenge.png',
+ 'http://bokeh.pydata.org/static/snakebite.jpg'
+ ]
)
)
hover = HoverTool(
tooltips="""
<div>
- <span style="font-size: 17px; font-weight: bold;">@desc</span>
- <span style="font-size: 15px; color: #966;">[$index]</span>
- </div>
- <div>
- <span style="font-size: 15px;">Location</span>
- <span style="font-size: 10px; color: #696;">($x, $y)</span>
+ <div>
+ <img
+ src="@imgs" height="42" alt="@imgs" width="42"
+ style="float: left; margin: 0px 15px 15px 0px;"
+ border="2"
+ ></img>
+ </div>
+ <div>
+ <span style="font-size: 17px; font-weight: bold;">@desc</span>
+ <span style="font-size: 15px; color: #966;">[$index]</span>
+ </div>
+ <div>
+ <span style="font-size: 15px;">Location</span>
+ <span style="font-size: 10px; color: #696;">($x, $y)</span>
+ </div>
</div>
"""
)
@@ -30,5 +46,3 @@
p.circle('x', 'y', size=20, source=source)
show(p)
-
-
| {"golden_diff": "diff --git a/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py b/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py\n--- a/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py\n+++ b/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py\n@@ -8,18 +8,34 @@\n x=[1, 2, 3, 4, 5],\n y=[2, 5, 8, 2, 7],\n desc=['A', 'b', 'C', 'd', 'E'],\n+ imgs = [\n+ 'http://bokeh.pydata.org/static/snake.jpg',\n+ 'http://bokeh.pydata.org/static/snake2.png',\n+ 'http://bokeh.pydata.org/static/snake3D.png',\n+ 'http://bokeh.pydata.org/static/snake4_TheRevenge.png',\n+ 'http://bokeh.pydata.org/static/snakebite.jpg'\n+ ]\n )\n )\n \n hover = HoverTool(\n tooltips=\"\"\"\n <div>\n- <span style=\"font-size: 17px; font-weight: bold;\">@desc</span>\n- <span style=\"font-size: 15px; color: #966;\">[$index]</span>\n- </div>\n- <div>\n- <span style=\"font-size: 15px;\">Location</span>\n- <span style=\"font-size: 10px; color: #696;\">($x, $y)</span>\n+ <div>\n+ <img\n+ src=\"@imgs\" height=\"42\" alt=\"@imgs\" width=\"42\"\n+ style=\"float: left; margin: 0px 15px 15px 0px;\"\n+ border=\"2\"\n+ ></img>\n+ </div>\n+ <div>\n+ <span style=\"font-size: 17px; font-weight: bold;\">@desc</span>\n+ <span style=\"font-size: 15px; color: #966;\">[$index]</span>\n+ </div>\n+ <div>\n+ <span style=\"font-size: 15px;\">Location</span>\n+ <span style=\"font-size: 10px; color: #696;\">($x, $y)</span>\n+ </div>\n </div>\n \"\"\"\n )\n@@ -30,5 +46,3 @@\n p.circle('x', 'y', size=20, source=source)\n \n show(p)\n-\n-\n", "issue": "Create example of using Hover tool to display custom images\nIt would be nice to show how someone can use the hovertool to display custom images using URL/URI upon hovering over a region of interest. It would allow users to embed an additional dimension into plots.\n\n", "before_files": [{"content": "from bokeh.plotting import figure, output_file, show, ColumnDataSource\nfrom bokeh.models import HoverTool\n\noutput_file(\"toolbar.html\")\n\nsource = ColumnDataSource(\n data=dict(\n x=[1, 2, 3, 4, 5],\n y=[2, 5, 8, 2, 7],\n desc=['A', 'b', 'C', 'd', 'E'],\n )\n )\n\nhover = HoverTool(\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 17px; font-weight: bold;\">@desc</span>\n <span style=\"font-size: 15px; color: #966;\">[$index]</span>\n </div>\n <div>\n <span style=\"font-size: 15px;\">Location</span>\n <span style=\"font-size: 10px; color: #696;\">($x, $y)</span>\n </div>\n \"\"\"\n )\n\np = figure(plot_width=400, plot_height=400, tools=[hover],\n title=\"Mouse over the dots\")\n\np.circle('x', 'y', size=20, source=source)\n\nshow(p)\n\n \n", "path": "sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py"}]} | 938 | 575 |
gh_patches_debug_25146 | rasdani/github-patches | git_diff | qtile__qtile-472 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CPU usage leak
Running qtile master. Over a timespan of about 24 hours, qtile's CPU usage rises from trivially low to nearly 100%. In the worst stages, qtile is non-responsive to keyboard shortcuts, forcing me to kill it externally.
</issue>
<code>
[start of libqtile/widget/battery.py]
1 import cairo
2 import os
3 from libqtile import bar
4 import base
5
6 BAT_DIR = '/sys/class/power_supply'
7 CHARGED = 'Full'
8 CHARGING = 'Charging'
9 DISCHARGING = 'Discharging'
10 UNKNOWN = 'Unknown'
11
12 BATTERY_INFO_FILES = {
13 'energy_now_file': ['energy_now', 'charge_now'],
14 'energy_full_file': ['energy_full', 'charge_full'],
15 'power_now_file': ['power_now', 'current_now'],
16 'status_file': ['status'],
17 }
18
19
20 def default_icon_path():
21 # default icons are in libqtile/resources/battery-icons
22 root = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2])
23 return os.path.join(root, 'resources', 'battery-icons')
24
25
26 class _Battery(base._TextBox):
27 ''' Base battery class '''
28
29 filenames = {}
30
31 defaults = [
32 ('battery_name', 'BAT0', 'ACPI name of a battery, usually BAT0'),
33 (
34 'status_file',
35 'status',
36 'Name of status file in'
37 ' /sys/class/power_supply/battery_name'
38 ),
39 (
40 'energy_now_file',
41 None,
42 'Name of file with the '
43 'current energy in /sys/class/power_supply/battery_name'
44 ),
45 (
46 'energy_full_file',
47 None,
48 'Name of file with the maximum'
49 ' energy in /sys/class/power_supply/battery_name'
50 ),
51 (
52 'power_now_file',
53 None,
54 'Name of file with the current'
55 ' power draw in /sys/class/power_supply/battery_name'
56 ),
57 ('update_delay', 1, 'The delay in seconds between updates'),
58 ]
59
60 def __init__(self, **config):
61 base._TextBox.__init__(self, "BAT", bar.CALCULATED, **config)
62 self.add_defaults(_Battery.defaults)
63
64 def _load_file(self, name):
65 try:
66 path = os.path.join(BAT_DIR, self.battery_name, name)
67 with open(path, 'r') as f:
68 return f.read().strip()
69 except IOError:
70 if name == 'current_now':
71 return 0
72 return False
73 except Exception:
74 self.log.exception("Failed to get %s" % name)
75
76 def _get_param(self, name):
77 if name in self.filenames:
78 return self._load_file(self.filenames[name])
79 else:
80 # Don't have the file name cached, figure it out
81 file_list = BATTERY_INFO_FILES.get(name, [])
82 if getattr(self, name, None):
83 # If a file is manually specified, check it first
84 file_list.insert(0, getattr(self, name))
85
86 # Iterate over the possibilities, and return the first valid value
87 for file in file_list:
88 value = self._load_file(file)
89 if not (value in (False, None)):
90 self.filenames[name] = file
91 return value
92
93 # If we made it this far, we don't have a valid file. Just return None.
94 return None
95
96 def _get_info(self):
97 try:
98 info = {
99 'stat': self._get_param('status_file'),
100 'now': float(self._get_param('energy_now_file')),
101 'full': float(self._get_param('energy_full_file')),
102 'power': float(self._get_param('power_now_file')),
103 }
104 except TypeError:
105 return False
106 return info
107
108
109 class Battery(_Battery):
110 """
111 A simple but flexible text-based battery widget.
112 """
113 defaults = [
114 ('low_foreground', 'FF0000', 'font color when battery is low'),
115 (
116 'format',
117 '{char} {percent:2.0%} {hour:d}:{min:02d}',
118 'Display format'
119 ),
120 ('charge_char', '^', 'Character to indicate the battery is charging'),
121 (
122 'discharge_char',
123 'V',
124 'Character to indicate the battery'
125 ' is discharging'
126 ),
127 (
128 'low_percentage',
129 0.10,
130 "0 < x < 1 at which to indicate battery is low with low_foreground"
131 ),
132 ('hide_threshold', None, 'Hide the text when there is enough energy'),
133 ]
134
135 def __init__(self, **config):
136 _Battery.__init__(self, **config)
137 self.add_defaults(Battery.defaults)
138 self.timeout_add(self.update_delay, self.update)
139 self.update()
140
141 def _get_text(self):
142 info = self._get_info()
143 if info is False:
144 return 'Error'
145
146 # Set the charging character
147 try:
148 # hide the text when it's higher than threshold, but still
149 # display `full` when the battery is fully charged.
150 if self.hide_threshold and \
151 info['now'] / info['full'] * 100.0 >= \
152 self.hide_threshold and \
153 info['stat'] != CHARGED:
154 return ''
155 elif info['stat'] == DISCHARGING:
156 char = self.discharge_char
157 time = info['now'] / info['power']
158 elif info['stat'] == CHARGING:
159 char = self.charge_char
160 time = (info['full'] - info['now']) / info['power']
161 else:
162 return 'Full'
163 except ZeroDivisionError:
164 time = -1
165
166 # Calculate the battery percentage and time left
167 if time >= 0:
168 hour = int(time)
169 min = int(time * 60) % 60
170 else:
171 hour = -1
172 min = -1
173 percent = info['now'] / info['full']
174 if info['stat'] == DISCHARGING and percent < self.low_percentage:
175 self.layout.colour = self.low_foreground
176 else:
177 self.layout.colour = self.foreground
178
179 return self.format.format(
180 char=char,
181 percent=percent,
182 hour=hour,
183 min=min
184 )
185
186 def update(self):
187 if self.configured:
188 ntext = self._get_text()
189 if ntext != self.text:
190 self.text = ntext
191 self.bar.draw()
192 return True
193
194
195 class BatteryIcon(_Battery):
196 ''' Battery life indicator widget '''
197
198 defaults = [
199 ('theme_path', default_icon_path(), 'Path of the icons'),
200 ('custom_icons', {}, 'dict containing key->filename icon map'),
201 ]
202
203 def __init__(self, **config):
204 _Battery.__init__(self, **config)
205 self.add_defaults(BatteryIcon.defaults)
206
207 if self.theme_path:
208 self.width_type = bar.STATIC
209 self.width = 0
210 self.surfaces = {}
211 self.current_icon = 'battery-missing'
212 self.icons = dict([(x, '{0}.png'.format(x)) for x in (
213 'battery-missing',
214 'battery-caution',
215 'battery-low',
216 'battery-good',
217 'battery-full',
218 'battery-caution-charging',
219 'battery-low-charging',
220 'battery-good-charging',
221 'battery-full-charging',
222 'battery-full-charged',
223 )])
224 self.icons.update(self.custom_icons)
225 self.timeout_add(self.update_delay, self.update)
226
227 def _configure(self, qtile, bar):
228 base._TextBox._configure(self, qtile, bar)
229 self.setup_images()
230
231 def _get_icon_key(self):
232 key = 'battery'
233 info = self._get_info()
234 if info is False or not info.get('full'):
235 key += '-missing'
236 else:
237 percent = info['now'] / info['full']
238 if percent < .2:
239 key += '-caution'
240 elif percent < .4:
241 key += '-low'
242 elif percent < .8:
243 key += '-good'
244 else:
245 key += '-full'
246
247 if info['stat'] == CHARGING:
248 key += '-charging'
249 elif info['stat'] == CHARGED:
250 key += '-charged'
251 return key
252
253 def update(self):
254 if self.configured:
255 icon = self._get_icon_key()
256 if icon != self.current_icon:
257 self.current_icon = icon
258 self.draw()
259 return True
260
261 def draw(self):
262 if self.theme_path:
263 self.drawer.clear(self.background or self.bar.background)
264 self.drawer.ctx.set_source(self.surfaces[self.current_icon])
265 self.drawer.ctx.paint()
266 self.drawer.draw(self.offset, self.width)
267 else:
268 self.text = self.current_icon[8:]
269 base._TextBox.draw(self)
270
271 def setup_images(self):
272 for key, name in self.icons.iteritems():
273 try:
274 path = os.path.join(self.theme_path, name)
275 img = cairo.ImageSurface.create_from_png(path)
276 except cairo.Error:
277 self.theme_path = None
278 self.qtile.log.warning('Battery Icon switching to text mode')
279 return
280 input_width = img.get_width()
281 input_height = img.get_height()
282
283 sp = input_height / float(self.bar.height - 1)
284
285 width = input_width / sp
286 if width > self.width:
287 self.width = int(width) + self.actual_padding * 2
288
289 imgpat = cairo.SurfacePattern(img)
290
291 scaler = cairo.Matrix()
292
293 scaler.scale(sp, sp)
294 scaler.translate(self.actual_padding * -1, 0)
295 imgpat.set_matrix(scaler)
296
297 imgpat.set_filter(cairo.FILTER_BEST)
298 self.surfaces[key] = imgpat
299
[end of libqtile/widget/battery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/widget/battery.py b/libqtile/widget/battery.py
--- a/libqtile/widget/battery.py
+++ b/libqtile/widget/battery.py
@@ -74,11 +74,14 @@
self.log.exception("Failed to get %s" % name)
def _get_param(self, name):
- if name in self.filenames:
+ if name in self.filenames and self.filenames[name]:
return self._load_file(self.filenames[name])
- else:
+ elif name not in self.filenames:
# Don't have the file name cached, figure it out
- file_list = BATTERY_INFO_FILES.get(name, [])
+
+ # Don't modify the global list! Copy with [:]
+ file_list = BATTERY_INFO_FILES.get(name, [])[:]
+
if getattr(self, name, None):
# If a file is manually specified, check it first
file_list.insert(0, getattr(self, name))
@@ -90,7 +93,10 @@
self.filenames[name] = file
return value
- # If we made it this far, we don't have a valid file. Just return None.
+ # If we made it this far, we don't have a valid file.
+ # Set it to None to avoid trying the next time.
+ self.filenames[name] = None
+
return None
def _get_info(self):
| {"golden_diff": "diff --git a/libqtile/widget/battery.py b/libqtile/widget/battery.py\n--- a/libqtile/widget/battery.py\n+++ b/libqtile/widget/battery.py\n@@ -74,11 +74,14 @@\n self.log.exception(\"Failed to get %s\" % name)\n \n def _get_param(self, name):\n- if name in self.filenames:\n+ if name in self.filenames and self.filenames[name]:\n return self._load_file(self.filenames[name])\n- else:\n+ elif name not in self.filenames:\n # Don't have the file name cached, figure it out\n- file_list = BATTERY_INFO_FILES.get(name, [])\n+\n+ # Don't modify the global list! Copy with [:]\n+ file_list = BATTERY_INFO_FILES.get(name, [])[:]\n+\n if getattr(self, name, None):\n # If a file is manually specified, check it first\n file_list.insert(0, getattr(self, name))\n@@ -90,7 +93,10 @@\n self.filenames[name] = file\n return value\n \n- # If we made it this far, we don't have a valid file. Just return None.\n+ # If we made it this far, we don't have a valid file.\n+ # Set it to None to avoid trying the next time.\n+ self.filenames[name] = None\n+\n return None\n \n def _get_info(self):\n", "issue": "CPU usage leak\nRunning qtile master. Over a timespan of about 24 hours, qtile's CPU usage rises from trivially low to nearly 100%. In the worst stages, qtile is non-responsive to keyboard shortcuts, forcing me to kill it externally.\n\n", "before_files": [{"content": "import cairo\nimport os\nfrom libqtile import bar\nimport base\n\nBAT_DIR = '/sys/class/power_supply'\nCHARGED = 'Full'\nCHARGING = 'Charging'\nDISCHARGING = 'Discharging'\nUNKNOWN = 'Unknown'\n\nBATTERY_INFO_FILES = {\n 'energy_now_file': ['energy_now', 'charge_now'],\n 'energy_full_file': ['energy_full', 'charge_full'],\n 'power_now_file': ['power_now', 'current_now'],\n 'status_file': ['status'],\n}\n\n\ndef default_icon_path():\n # default icons are in libqtile/resources/battery-icons\n root = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2])\n return os.path.join(root, 'resources', 'battery-icons')\n\n\nclass _Battery(base._TextBox):\n ''' Base battery class '''\n\n filenames = {}\n\n defaults = [\n ('battery_name', 'BAT0', 'ACPI name of a battery, usually BAT0'),\n (\n 'status_file',\n 'status',\n 'Name of status file in'\n ' /sys/class/power_supply/battery_name'\n ),\n (\n 'energy_now_file',\n None,\n 'Name of file with the '\n 'current energy in /sys/class/power_supply/battery_name'\n ),\n (\n 'energy_full_file',\n None,\n 'Name of file with the maximum'\n ' energy in /sys/class/power_supply/battery_name'\n ),\n (\n 'power_now_file',\n None,\n 'Name of file with the current'\n ' power draw in /sys/class/power_supply/battery_name'\n ),\n ('update_delay', 1, 'The delay in seconds between updates'),\n ]\n\n def __init__(self, **config):\n base._TextBox.__init__(self, \"BAT\", bar.CALCULATED, **config)\n self.add_defaults(_Battery.defaults)\n\n def _load_file(self, name):\n try:\n path = os.path.join(BAT_DIR, self.battery_name, name)\n with open(path, 'r') as f:\n return f.read().strip()\n except IOError:\n if name == 'current_now':\n return 0\n return False\n except Exception:\n self.log.exception(\"Failed to get %s\" % name)\n\n def _get_param(self, name):\n if name in self.filenames:\n return self._load_file(self.filenames[name])\n else:\n # Don't have the file name cached, figure it out\n file_list = BATTERY_INFO_FILES.get(name, [])\n if getattr(self, name, None):\n # If a file is manually specified, check it first\n file_list.insert(0, getattr(self, name))\n\n # Iterate over the possibilities, and return the first valid value\n for file in file_list:\n value = self._load_file(file)\n if not (value in (False, None)):\n self.filenames[name] = file\n return value\n\n # If we made it this far, we don't have a valid file. Just return None.\n return None\n\n def _get_info(self):\n try:\n info = {\n 'stat': self._get_param('status_file'),\n 'now': float(self._get_param('energy_now_file')),\n 'full': float(self._get_param('energy_full_file')),\n 'power': float(self._get_param('power_now_file')),\n }\n except TypeError:\n return False\n return info\n\n\nclass Battery(_Battery):\n \"\"\"\n A simple but flexible text-based battery widget.\n \"\"\"\n defaults = [\n ('low_foreground', 'FF0000', 'font color when battery is low'),\n (\n 'format',\n '{char} {percent:2.0%} {hour:d}:{min:02d}',\n 'Display format'\n ),\n ('charge_char', '^', 'Character to indicate the battery is charging'),\n (\n 'discharge_char',\n 'V',\n 'Character to indicate the battery'\n ' is discharging'\n ),\n (\n 'low_percentage',\n 0.10,\n \"0 < x < 1 at which to indicate battery is low with low_foreground\"\n ),\n ('hide_threshold', None, 'Hide the text when there is enough energy'),\n ]\n\n def __init__(self, **config):\n _Battery.__init__(self, **config)\n self.add_defaults(Battery.defaults)\n self.timeout_add(self.update_delay, self.update)\n self.update()\n\n def _get_text(self):\n info = self._get_info()\n if info is False:\n return 'Error'\n\n # Set the charging character\n try:\n # hide the text when it's higher than threshold, but still\n # display `full` when the battery is fully charged.\n if self.hide_threshold and \\\n info['now'] / info['full'] * 100.0 >= \\\n self.hide_threshold and \\\n info['stat'] != CHARGED:\n return ''\n elif info['stat'] == DISCHARGING:\n char = self.discharge_char\n time = info['now'] / info['power']\n elif info['stat'] == CHARGING:\n char = self.charge_char\n time = (info['full'] - info['now']) / info['power']\n else:\n return 'Full'\n except ZeroDivisionError:\n time = -1\n\n # Calculate the battery percentage and time left\n if time >= 0:\n hour = int(time)\n min = int(time * 60) % 60\n else:\n hour = -1\n min = -1\n percent = info['now'] / info['full']\n if info['stat'] == DISCHARGING and percent < self.low_percentage:\n self.layout.colour = self.low_foreground\n else:\n self.layout.colour = self.foreground\n\n return self.format.format(\n char=char,\n percent=percent,\n hour=hour,\n min=min\n )\n\n def update(self):\n if self.configured:\n ntext = self._get_text()\n if ntext != self.text:\n self.text = ntext\n self.bar.draw()\n return True\n\n\nclass BatteryIcon(_Battery):\n ''' Battery life indicator widget '''\n\n defaults = [\n ('theme_path', default_icon_path(), 'Path of the icons'),\n ('custom_icons', {}, 'dict containing key->filename icon map'),\n ]\n\n def __init__(self, **config):\n _Battery.__init__(self, **config)\n self.add_defaults(BatteryIcon.defaults)\n\n if self.theme_path:\n self.width_type = bar.STATIC\n self.width = 0\n self.surfaces = {}\n self.current_icon = 'battery-missing'\n self.icons = dict([(x, '{0}.png'.format(x)) for x in (\n 'battery-missing',\n 'battery-caution',\n 'battery-low',\n 'battery-good',\n 'battery-full',\n 'battery-caution-charging',\n 'battery-low-charging',\n 'battery-good-charging',\n 'battery-full-charging',\n 'battery-full-charged',\n )])\n self.icons.update(self.custom_icons)\n self.timeout_add(self.update_delay, self.update)\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n self.setup_images()\n\n def _get_icon_key(self):\n key = 'battery'\n info = self._get_info()\n if info is False or not info.get('full'):\n key += '-missing'\n else:\n percent = info['now'] / info['full']\n if percent < .2:\n key += '-caution'\n elif percent < .4:\n key += '-low'\n elif percent < .8:\n key += '-good'\n else:\n key += '-full'\n\n if info['stat'] == CHARGING:\n key += '-charging'\n elif info['stat'] == CHARGED:\n key += '-charged'\n return key\n\n def update(self):\n if self.configured:\n icon = self._get_icon_key()\n if icon != self.current_icon:\n self.current_icon = icon\n self.draw()\n return True\n\n def draw(self):\n if self.theme_path:\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.ctx.set_source(self.surfaces[self.current_icon])\n self.drawer.ctx.paint()\n self.drawer.draw(self.offset, self.width)\n else:\n self.text = self.current_icon[8:]\n base._TextBox.draw(self)\n\n def setup_images(self):\n for key, name in self.icons.iteritems():\n try:\n path = os.path.join(self.theme_path, name)\n img = cairo.ImageSurface.create_from_png(path)\n except cairo.Error:\n self.theme_path = None\n self.qtile.log.warning('Battery Icon switching to text mode')\n return\n input_width = img.get_width()\n input_height = img.get_height()\n\n sp = input_height / float(self.bar.height - 1)\n\n width = input_width / sp\n if width > self.width:\n self.width = int(width) + self.actual_padding * 2\n\n imgpat = cairo.SurfacePattern(img)\n\n scaler = cairo.Matrix()\n\n scaler.scale(sp, sp)\n scaler.translate(self.actual_padding * -1, 0)\n imgpat.set_matrix(scaler)\n\n imgpat.set_filter(cairo.FILTER_BEST)\n self.surfaces[key] = imgpat\n", "path": "libqtile/widget/battery.py"}]} | 3,468 | 319 |
gh_patches_debug_11629 | rasdani/github-patches | git_diff | beeware__toga-193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ProgressBar doesn't appears in a Box [Core] [Cocoa]
Example code: https://gist.github.com/Dayof/528f9dc38f4178dbc25db6bab553e19a
When a progress bar is add inside of a box (bellow the label is the progress bar):

</issue>
<code>
[start of src/cocoa/toga_cocoa/widgets/progressbar.py]
1 from toga.interface import ProgressBar as ProgressBarInterface
2
3 from ..libs import *
4 from .base import WidgetMixin
5
6
7 class ProgressBar(ProgressBarInterface, WidgetMixin):
8 def __init__(self, id=None, style=None, max=None, value=None):
9 super().__init__(id=id, style=style, max=max, value=value)
10 self._create()
11
12 def create(self):
13 self._impl = NSProgressIndicator.new()
14 self._impl.setStyle_(NSProgressIndicatorBarStyle)
15 self._impl.setDisplayedWhenStopped_(True)
16
17 # Add the layout constraints
18 self._add_constraints()
19
20 def _set_value(self, value):
21 if value is not None:
22 self._impl.setDoubleValue_(value)
23
24 def start(self):
25 if self._impl and not self._running:
26 self._impl.startAnimation_(self._impl)
27 self._running = True
28
29 def stop(self):
30 if self._impl and self._running:
31 self._impl.stopAnimation_(self._impl)
32 self._running = False
33
34 def _set_max(self, value):
35 if value:
36 self._impl.setIndeterminate_(False)
37 self._impl.setMaxValue_(value)
38 else:
39 self._impl.setIndeterminate_(True)
40
[end of src/cocoa/toga_cocoa/widgets/progressbar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cocoa/toga_cocoa/widgets/progressbar.py b/src/cocoa/toga_cocoa/widgets/progressbar.py
--- a/src/cocoa/toga_cocoa/widgets/progressbar.py
+++ b/src/cocoa/toga_cocoa/widgets/progressbar.py
@@ -16,6 +16,7 @@
# Add the layout constraints
self._add_constraints()
+ self.rehint()
def _set_value(self, value):
if value is not None:
@@ -37,3 +38,9 @@
self._impl.setMaxValue_(value)
else:
self._impl.setIndeterminate_(True)
+
+ def rehint(self):
+ self.style.hint(
+ height=self._impl.fittingSize().height,
+ width=self._impl.fittingSize().width
+ )
| {"golden_diff": "diff --git a/src/cocoa/toga_cocoa/widgets/progressbar.py b/src/cocoa/toga_cocoa/widgets/progressbar.py\n--- a/src/cocoa/toga_cocoa/widgets/progressbar.py\n+++ b/src/cocoa/toga_cocoa/widgets/progressbar.py\n@@ -16,6 +16,7 @@\n \n # Add the layout constraints\n self._add_constraints()\n+ self.rehint()\n \n def _set_value(self, value):\n if value is not None:\n@@ -37,3 +38,9 @@\n self._impl.setMaxValue_(value)\n else:\n self._impl.setIndeterminate_(True)\n+\n+ def rehint(self):\n+ self.style.hint(\n+ height=self._impl.fittingSize().height,\n+ width=self._impl.fittingSize().width\n+ )\n", "issue": "ProgressBar doesn't appears in a Box [Core] [Cocoa]\nExample code: https://gist.github.com/Dayof/528f9dc38f4178dbc25db6bab553e19a\r\n\r\nWhen a progress bar is add inside of a box (bellow the label is the progress bar):\r\n\r\n\r\n\n", "before_files": [{"content": "from toga.interface import ProgressBar as ProgressBarInterface\n\nfrom ..libs import *\nfrom .base import WidgetMixin\n\n\nclass ProgressBar(ProgressBarInterface, WidgetMixin):\n def __init__(self, id=None, style=None, max=None, value=None):\n super().__init__(id=id, style=style, max=max, value=value)\n self._create()\n\n def create(self):\n self._impl = NSProgressIndicator.new()\n self._impl.setStyle_(NSProgressIndicatorBarStyle)\n self._impl.setDisplayedWhenStopped_(True)\n\n # Add the layout constraints\n self._add_constraints()\n\n def _set_value(self, value):\n if value is not None:\n self._impl.setDoubleValue_(value)\n\n def start(self):\n if self._impl and not self._running:\n self._impl.startAnimation_(self._impl)\n self._running = True\n\n def stop(self):\n if self._impl and self._running:\n self._impl.stopAnimation_(self._impl)\n self._running = False\n\n def _set_max(self, value):\n if value:\n self._impl.setIndeterminate_(False)\n self._impl.setMaxValue_(value)\n else:\n self._impl.setIndeterminate_(True)\n", "path": "src/cocoa/toga_cocoa/widgets/progressbar.py"}]} | 1,050 | 181 |
gh_patches_debug_40338 | rasdani/github-patches | git_diff | fossasia__open-event-server-4176 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix 'auth required' for GET /speakers
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server
**Current behavior:**
<!-- Describe how the bug manifests. -->
**Expected behavior:**
<!-- Describe what the behavior would be without the bug. -->
**Steps to reproduce:**
<!-- If you are able to illustrate the bug or feature request with an example, please provide steps to reproduce -->
**Related code:**
```
insert any relevant code here else remove this section
```
**Other information:**
<!-- List any other information that is relevant to your issue. Stack traces, related issues, suggestions on how to fix, Stack Overflow links, forum links, etc. -->
**System information:**
<!-- Add information about the system your facing this bug on. If you think this is irrelevant or if it's a UI bug or a feature request, please remove this section -->
```
Your operating system
```
```
output of `python --version`
```
</issue>
<code>
[start of app/api/speakers.py]
1 from marshmallow_jsonapi import fields
2 from marshmallow_jsonapi.flask import Schema, Relationship
3 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
4 from flask_rest_jsonapi.exceptions import ObjectNotFound
5
6 from app.api.helpers.utilities import dasherize
7 from app.api.helpers.permissions import jwt_required
8 from app.models import db
9 from app.models.speaker import Speaker
10 from app.models.session import Session
11 from app.models.user import User
12 from app.models.event import Event
13 from app.api.helpers.db import safe_query
14 from app.api.bootstrap import api
15 from app.api.helpers.utilities import require_relationship
16 from app.api.helpers.permission_manager import has_access
17
18
19 class SpeakerSchema(Schema):
20 """
21 Speaker Schema based on Speaker Model
22 """
23
24 class Meta:
25 """
26 Meta class for speaker schema
27 """
28 type_ = 'speaker'
29 self_view = 'v1.speaker_detail'
30 self_view_kwargs = {'id': '<id>'}
31 inflect = dasherize
32
33 id = fields.Str(dump_only=True)
34 name = fields.Str(required=True)
35 email = fields.Str(required=True)
36 photo_url = fields.Url(allow_none=True)
37 thumbnail_image_url = fields.Url(allow_none=True)
38 small_image_url = fields.Url(allow_none=True)
39 icon_image_url = fields.Url(allow_none=True)
40 short_biography = fields.Str(allow_none=True)
41 long_biography = fields.Str(allow_none=True)
42 speaking_experience = fields.Str(allow_none=True)
43 mobile = fields.Str(allow_none=True)
44 website = fields.Url(allow_none=True)
45 twitter = fields.Url(allow_none=True)
46 facebook = fields.Url(allow_none=True)
47 github = fields.Url(allow_none=True)
48 linkedin = fields.Url(allow_none=True)
49 organisation = fields.Str(allow_none=True)
50 is_featured = fields.Boolean(default=False)
51 position = fields.Str(allow_none=True)
52 country = fields.Str(allow_none=True)
53 city = fields.Str(allow_none=True)
54 gender = fields.Str(allow_none=True)
55 heard_from = fields.Str(allow_none=True)
56 sponsorship_required = fields.Str(allow_none=True)
57 event = Relationship(attribute='event',
58 self_view='v1.speaker_event',
59 self_view_kwargs={'id': '<id>'},
60 related_view='v1.event_detail',
61 related_view_kwargs={'speaker_id': '<id>'},
62 schema='EventSchema',
63 type_='event')
64 user = Relationship(attribute='user',
65 self_view='v1.speaker_user',
66 self_view_kwargs={'id': '<id>'},
67 related_view='v1.user_detail',
68 related_view_kwargs={'speaker_id': '<id>'},
69 schema='UserSchema',
70 type_='user')
71 sessions = Relationship(attribute='sessions',
72 self_view='v1.speaker_session',
73 self_view_kwargs={'id': '<id>'},
74 related_view='v1.session_list',
75 related_view_kwargs={'speaker_id': '<id>'},
76 schema='SessionSchema',
77 many=True,
78 type_='session')
79
80
81 class SpeakerListPost(ResourceList):
82 """
83 List and create speakers
84 """
85
86 def before_post(self, args, kwargs, data):
87 """
88 method to add user_id to view_kwargs before post
89 :param args:
90 :param kwargs:
91 :param data:
92 :return:
93 """
94 require_relationship(['event', 'user'], data)
95
96 if not has_access('is_coorganizer', event_id=data['event']):
97 event = safe_query(self, Event, 'id', data['event'], 'event_id')
98 if event.state == "draft":
99 raise ObjectNotFound({'parameter': 'event_id'},
100 "Event: {} not found".format(data['event_id']))
101
102 if 'sessions' in data:
103 session_ids = data['sessions']
104 for session_id in session_ids:
105 if not has_access('is_session_self_submitted', session_id=session_id):
106 raise ObjectNotFound({'parameter': 'session_id'},
107 "Session: {} not found".format(session_id))
108
109 schema = SpeakerSchema
110 methods = ['POST', ]
111 data_layer = {'session': db.session,
112 'model': Speaker
113 }
114
115
116 class SpeakerList(ResourceList):
117 """
118 List speakers based on different params from view_kwargs
119 """
120
121 def query(self, view_kwargs):
122 """
123 query method for speakers list class
124 :param view_kwargs:
125 :return:
126 """
127 query_ = self.session.query(Speaker)
128 if view_kwargs.get('event_identifier'):
129 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')
130 view_kwargs['event_id'] = event.id
131 if view_kwargs.get('event_id'):
132 event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')
133 query_ = query_.join(Event).filter(Event.id == event.id)
134 if not has_access('is_coorganizer', event_id=event.id):
135 query_ = query_.filter(Event.state == "published")
136
137 if view_kwargs.get('user_id'):
138 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
139 query_ = query_.join(User).filter(User.id == user.id)
140
141 if view_kwargs.get('session_id'):
142 session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
143 # session-speaker :: many-to-many relationship
144 query_ = Speaker.query.filter(Speaker.sessions.any(id=session.id))
145 if not has_access('is_coorganizer', event_id=session.event_id):
146 if not has_access('is_session_self_submitted', session_id=session.id):
147 query_ = query_.filter(Session.state == "approved" or Session.state == "accepted")
148
149 return query_
150
151 view_kwargs = True
152 schema = SpeakerSchema
153 methods = ['GET', ]
154 data_layer = {'session': db.session,
155 'model': Speaker,
156 'methods': {
157 'query': query,
158 }}
159
160
161 class SpeakerDetail(ResourceDetail):
162 """
163 Speakers Detail by id
164 """
165 decorators = (api.has_permission('is_coorganizer_or_user_itself', methods="PATCH,DELETE", fetch="event_id",
166 fetch_as="event_id", model=Speaker, check=lambda a: a.get('id') is not None),)
167 schema = SpeakerSchema
168 data_layer = {'session': db.session,
169 'model': Speaker}
170
171
172 class SpeakerRelationshipRequired(ResourceRelationship):
173 """
174 Speaker Relationship class
175 """
176 decorators = (jwt_required,)
177 methods = ['GET', 'PATCH']
178 schema = SpeakerSchema
179 data_layer = {'session': db.session,
180 'model': Speaker}
181
182
183 class SpeakerRelationshipOptional(ResourceRelationship):
184 """
185 Speaker Relationship class
186 """
187 decorators = (jwt_required,)
188 schema = SpeakerSchema
189 data_layer = {'session': db.session,
190 'model': Speaker}
191
[end of app/api/speakers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/speakers.py b/app/api/speakers.py
--- a/app/api/speakers.py
+++ b/app/api/speakers.py
@@ -2,6 +2,7 @@
from marshmallow_jsonapi.flask import Schema, Relationship
from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
from flask_rest_jsonapi.exceptions import ObjectNotFound
+from flask import request
from app.api.helpers.utilities import dasherize
from app.api.helpers.permissions import jwt_required
@@ -125,14 +126,26 @@
:return:
"""
query_ = self.session.query(Speaker)
- if view_kwargs.get('event_identifier'):
- event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')
- view_kwargs['event_id'] = event.id
if view_kwargs.get('event_id'):
event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')
- query_ = query_.join(Event).filter(Event.id == event.id)
- if not has_access('is_coorganizer', event_id=event.id):
- query_ = query_.filter(Event.state == "published")
+ if event.state != 'published':
+ if 'Authorization' in request.headers and has_access('is_coorganizer', event_id=event.id):
+ query_ = query_.join(Event).filter(Event.id == event.id)
+ else:
+ raise ObjectNotFound({'parameter': 'event_id'},
+ "Event: {} not found".format(view_kwargs['event_identifier']))
+ else:
+ query_ = query_.join(Event).filter(Event.id == event.id)
+ elif view_kwargs.get('event_identifier'):
+ event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')
+ if event.state != 'published':
+ if 'Authorization' in request.headers and has_access('is_coorganizer', event_id=event.id):
+ query_ = query_.join(Event).filter(Event.id == event.id)
+ else:
+ raise ObjectNotFound({'parameter': 'event_identifier'},
+ "Event: {} not found".format(view_kwargs['event_identifier']))
+ else:
+ query_ = query_.join(Event).filter(Event.id == event.id)
if view_kwargs.get('user_id'):
user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
@@ -142,7 +155,7 @@
session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
# session-speaker :: many-to-many relationship
query_ = Speaker.query.filter(Speaker.sessions.any(id=session.id))
- if not has_access('is_coorganizer', event_id=session.event_id):
+ if 'Authorization' in request.headers and not has_access('is_coorganizer', event_id=session.event_id):
if not has_access('is_session_self_submitted', session_id=session.id):
query_ = query_.filter(Session.state == "approved" or Session.state == "accepted")
| {"golden_diff": "diff --git a/app/api/speakers.py b/app/api/speakers.py\n--- a/app/api/speakers.py\n+++ b/app/api/speakers.py\n@@ -2,6 +2,7 @@\n from marshmallow_jsonapi.flask import Schema, Relationship\n from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n from flask_rest_jsonapi.exceptions import ObjectNotFound\n+from flask import request\n \n from app.api.helpers.utilities import dasherize\n from app.api.helpers.permissions import jwt_required\n@@ -125,14 +126,26 @@\n :return:\n \"\"\"\n query_ = self.session.query(Speaker)\n- if view_kwargs.get('event_identifier'):\n- event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n- view_kwargs['event_id'] = event.id\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n- query_ = query_.join(Event).filter(Event.id == event.id)\n- if not has_access('is_coorganizer', event_id=event.id):\n- query_ = query_.filter(Event.state == \"published\")\n+ if event.state != 'published':\n+ if 'Authorization' in request.headers and has_access('is_coorganizer', event_id=event.id):\n+ query_ = query_.join(Event).filter(Event.id == event.id)\n+ else:\n+ raise ObjectNotFound({'parameter': 'event_id'},\n+ \"Event: {} not found\".format(view_kwargs['event_identifier']))\n+ else:\n+ query_ = query_.join(Event).filter(Event.id == event.id)\n+ elif view_kwargs.get('event_identifier'):\n+ event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n+ if event.state != 'published':\n+ if 'Authorization' in request.headers and has_access('is_coorganizer', event_id=event.id):\n+ query_ = query_.join(Event).filter(Event.id == event.id)\n+ else:\n+ raise ObjectNotFound({'parameter': 'event_identifier'},\n+ \"Event: {} not found\".format(view_kwargs['event_identifier']))\n+ else:\n+ query_ = query_.join(Event).filter(Event.id == event.id)\n \n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n@@ -142,7 +155,7 @@\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n # session-speaker :: many-to-many relationship\n query_ = Speaker.query.filter(Speaker.sessions.any(id=session.id))\n- if not has_access('is_coorganizer', event_id=session.event_id):\n+ if 'Authorization' in request.headers and not has_access('is_coorganizer', event_id=session.event_id):\n if not has_access('is_session_self_submitted', session_id=session.id):\n query_ = query_.filter(Session.state == \"approved\" or Session.state == \"accepted\")\n", "issue": "Fix 'auth required' for GET /speakers\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\n**Current behavior:**\r\n<!-- Describe how the bug manifests. -->\r\n\r\n**Expected behavior:**\r\n<!-- Describe what the behavior would be without the bug. -->\r\n\r\n**Steps to reproduce:**\r\n<!-- If you are able to illustrate the bug or feature request with an example, please provide steps to reproduce -->\r\n\r\n**Related code:**\r\n\r\n```\r\ninsert any relevant code here else remove this section\r\n```\r\n\r\n**Other information:**\r\n<!-- List any other information that is relevant to your issue. Stack traces, related issues, suggestions on how to fix, Stack Overflow links, forum links, etc. -->\r\n\r\n**System information:** \r\n\r\n<!-- Add information about the system your facing this bug on. If you think this is irrelevant or if it's a UI bug or a feature request, please remove this section -->\r\n\r\n```\r\nYour operating system\r\n```\r\n\r\n```\r\noutput of `python --version`\r\n```\r\n\n", "before_files": [{"content": "from marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.helpers.permissions import jwt_required\nfrom app.models import db\nfrom app.models.speaker import Speaker\nfrom app.models.session import Session\nfrom app.models.user import User\nfrom app.models.event import Event\nfrom app.api.helpers.db import safe_query\nfrom app.api.bootstrap import api\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.permission_manager import has_access\n\n\nclass SpeakerSchema(Schema):\n \"\"\"\n Speaker Schema based on Speaker Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for speaker schema\n \"\"\"\n type_ = 'speaker'\n self_view = 'v1.speaker_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n name = fields.Str(required=True)\n email = fields.Str(required=True)\n photo_url = fields.Url(allow_none=True)\n thumbnail_image_url = fields.Url(allow_none=True)\n small_image_url = fields.Url(allow_none=True)\n icon_image_url = fields.Url(allow_none=True)\n short_biography = fields.Str(allow_none=True)\n long_biography = fields.Str(allow_none=True)\n speaking_experience = fields.Str(allow_none=True)\n mobile = fields.Str(allow_none=True)\n website = fields.Url(allow_none=True)\n twitter = fields.Url(allow_none=True)\n facebook = fields.Url(allow_none=True)\n github = fields.Url(allow_none=True)\n linkedin = fields.Url(allow_none=True)\n organisation = fields.Str(allow_none=True)\n is_featured = fields.Boolean(default=False)\n position = fields.Str(allow_none=True)\n country = fields.Str(allow_none=True)\n city = fields.Str(allow_none=True)\n gender = fields.Str(allow_none=True)\n heard_from = fields.Str(allow_none=True)\n sponsorship_required = fields.Str(allow_none=True)\n event = Relationship(attribute='event',\n self_view='v1.speaker_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'speaker_id': '<id>'},\n schema='EventSchema',\n type_='event')\n user = Relationship(attribute='user',\n self_view='v1.speaker_user',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.user_detail',\n related_view_kwargs={'speaker_id': '<id>'},\n schema='UserSchema',\n type_='user')\n sessions = Relationship(attribute='sessions',\n self_view='v1.speaker_session',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.session_list',\n related_view_kwargs={'speaker_id': '<id>'},\n schema='SessionSchema',\n many=True,\n type_='session')\n\n\nclass SpeakerListPost(ResourceList):\n \"\"\"\n List and create speakers\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n method to add user_id to view_kwargs before post\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event', 'user'], data)\n\n if not has_access('is_coorganizer', event_id=data['event']):\n event = safe_query(self, Event, 'id', data['event'], 'event_id')\n if event.state == \"draft\":\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event_id']))\n\n if 'sessions' in data:\n session_ids = data['sessions']\n for session_id in session_ids:\n if not has_access('is_session_self_submitted', session_id=session_id):\n raise ObjectNotFound({'parameter': 'session_id'},\n \"Session: {} not found\".format(session_id))\n\n schema = SpeakerSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': Speaker\n }\n\n\nclass SpeakerList(ResourceList):\n \"\"\"\n List speakers based on different params from view_kwargs\n \"\"\"\n\n def query(self, view_kwargs):\n \"\"\"\n query method for speakers list class\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(Speaker)\n if view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n view_kwargs['event_id'] = event.id\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n query_ = query_.join(Event).filter(Event.id == event.id)\n if not has_access('is_coorganizer', event_id=event.id):\n query_ = query_.filter(Event.state == \"published\")\n\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n query_ = query_.join(User).filter(User.id == user.id)\n\n if view_kwargs.get('session_id'):\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n # session-speaker :: many-to-many relationship\n query_ = Speaker.query.filter(Speaker.sessions.any(id=session.id))\n if not has_access('is_coorganizer', event_id=session.event_id):\n if not has_access('is_session_self_submitted', session_id=session.id):\n query_ = query_.filter(Session.state == \"approved\" or Session.state == \"accepted\")\n\n return query_\n\n view_kwargs = True\n schema = SpeakerSchema\n methods = ['GET', ]\n data_layer = {'session': db.session,\n 'model': Speaker,\n 'methods': {\n 'query': query,\n }}\n\n\nclass SpeakerDetail(ResourceDetail):\n \"\"\"\n Speakers Detail by id\n \"\"\"\n decorators = (api.has_permission('is_coorganizer_or_user_itself', methods=\"PATCH,DELETE\", fetch=\"event_id\",\n fetch_as=\"event_id\", model=Speaker, check=lambda a: a.get('id') is not None),)\n schema = SpeakerSchema\n data_layer = {'session': db.session,\n 'model': Speaker}\n\n\nclass SpeakerRelationshipRequired(ResourceRelationship):\n \"\"\"\n Speaker Relationship class\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = SpeakerSchema\n data_layer = {'session': db.session,\n 'model': Speaker}\n\n\nclass SpeakerRelationshipOptional(ResourceRelationship):\n \"\"\"\n Speaker Relationship class\n \"\"\"\n decorators = (jwt_required,)\n schema = SpeakerSchema\n data_layer = {'session': db.session,\n 'model': Speaker}\n", "path": "app/api/speakers.py"}]} | 2,740 | 675 |
gh_patches_debug_23822 | rasdani/github-patches | git_diff | pytorch__vision-3396 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ResNet FPN backbone
## 🐛 Bug
In https://github.com/pytorch/vision/blob/master/torchvision/models/detection/backbone_utils.py#L99, it says
> freeze layers only if pretrained backbone is used.
Therefore, if pretrained backbone is NOT used, any layers should be frozen.
But when I run:
```
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
resnet_fpn_backbone('resnet50', pretrained=False, norm_layer=None, trainable_layers=5)
```
I can see that parameters `bn1.weight, bn1.bias, fc.weight, fc.bias` are frozen on the [line 101](https://github.com/pytorch/vision/blob/master/torchvision/models/detection/backbone_utils.py#L101).
## To Reproduce
Steps to reproduce the behavior:
Run
```
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
resnet_fpn_backbone('resnet50', pretrained=False, norm_layer=None, trainable_layers=5)
```
and see that on the [line 101](https://github.com/pytorch/vision/blob/master/torchvision/models/detection/backbone_utils.py#L101) parameters `bn1.weight, bn1.bias, fc.weight, fc.bias` are frozen despite that `trainable_layers=5` which should correspond to no frozen layers as written on the [line 83](https://github.com/pytorch/vision/blob/master/torchvision/models/detection/backbone_utils.py#L83).
## Expected behavior
No layers are frozen.
## Environment
PyTorch version: 1.7.0
Is debug build: True
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 10.15.7 (x86_64)
GCC version: Could not collect
Clang version: 12.0.0 (clang-1200.0.32.29)
CMake version: version 3.18.2
Python version: 3.8 (64-bit runtime)
Is CUDA available: False
CUDA runtime version: No CUDA
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Versions of relevant libraries:
[pip3] numpy==1.19.2
[pip3] pytorch-lightning==1.1.1
[pip3] pytorch-lightning-bolts==0.2.5rc1
[pip3] torch==1.7.0
[pip3] torchvision==0.8.1
[conda] blas 1.0 mkl
[conda] mkl 2019.5 281 conda-forge
[conda] mkl-service 2.3.0 py38h0b31af3_0 conda-forge
[conda] mkl_fft 1.2.0 py38hc64f4ea_0
[conda] mkl_random 1.1.1 py38h959d312_0
[conda] numpy 1.19.2 py38h456fd55_0
[conda] numpy-base 1.19.2 py38hcfb5961_0
[conda] pytorch 1.7.0 py3.8_0 pytorch
[conda] pytorch-lightning 1.1.1 pyhd8ed1ab_0 conda-forge
[conda] pytorch-lightning-bolts 0.2.5rc1 pypi_0 pypi
[conda] torchvision 0.8.1 py38_cpu pytorch
</issue>
<code>
[start of torchvision/models/detection/backbone_utils.py]
1 import warnings
2 from torch import nn
3 from torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork, LastLevelMaxPool
4
5 from torchvision.ops import misc as misc_nn_ops
6 from .._utils import IntermediateLayerGetter
7 from .. import mobilenet
8 from .. import resnet
9
10
11 class BackboneWithFPN(nn.Module):
12 """
13 Adds a FPN on top of a model.
14 Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
15 extract a submodel that returns the feature maps specified in return_layers.
16 The same limitations of IntermediatLayerGetter apply here.
17 Args:
18 backbone (nn.Module)
19 return_layers (Dict[name, new_name]): a dict containing the names
20 of the modules for which the activations will be returned as
21 the key of the dict, and the value of the dict is the name
22 of the returned activation (which the user can specify).
23 in_channels_list (List[int]): number of channels for each feature map
24 that is returned, in the order they are present in the OrderedDict
25 out_channels (int): number of channels in the FPN.
26 Attributes:
27 out_channels (int): the number of channels in the FPN
28 """
29 def __init__(self, backbone, return_layers, in_channels_list, out_channels, extra_blocks=None):
30 super(BackboneWithFPN, self).__init__()
31
32 if extra_blocks is None:
33 extra_blocks = LastLevelMaxPool()
34
35 self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
36 self.fpn = FeaturePyramidNetwork(
37 in_channels_list=in_channels_list,
38 out_channels=out_channels,
39 extra_blocks=extra_blocks,
40 )
41 self.out_channels = out_channels
42
43 def forward(self, x):
44 x = self.body(x)
45 x = self.fpn(x)
46 return x
47
48
49 def resnet_fpn_backbone(
50 backbone_name,
51 pretrained,
52 norm_layer=misc_nn_ops.FrozenBatchNorm2d,
53 trainable_layers=3,
54 returned_layers=None,
55 extra_blocks=None
56 ):
57 """
58 Constructs a specified ResNet backbone with FPN on top. Freezes the specified number of layers in the backbone.
59
60 Examples::
61
62 >>> from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
63 >>> backbone = resnet_fpn_backbone('resnet50', pretrained=True, trainable_layers=3)
64 >>> # get some dummy image
65 >>> x = torch.rand(1,3,64,64)
66 >>> # compute the output
67 >>> output = backbone(x)
68 >>> print([(k, v.shape) for k, v in output.items()])
69 >>> # returns
70 >>> [('0', torch.Size([1, 256, 16, 16])),
71 >>> ('1', torch.Size([1, 256, 8, 8])),
72 >>> ('2', torch.Size([1, 256, 4, 4])),
73 >>> ('3', torch.Size([1, 256, 2, 2])),
74 >>> ('pool', torch.Size([1, 256, 1, 1]))]
75
76 Args:
77 backbone_name (string): resnet architecture. Possible values are 'ResNet', 'resnet18', 'resnet34', 'resnet50',
78 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'
79 pretrained (bool): If True, returns a model with backbone pre-trained on Imagenet
80 norm_layer (torchvision.ops): it is recommended to use the default value. For details visit:
81 (https://github.com/facebookresearch/maskrcnn-benchmark/issues/267)
82 trainable_layers (int): number of trainable (not frozen) resnet layers starting from final block.
83 Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.
84 returned_layers (list of int): The layers of the network to return. Each entry must be in ``[1, 4]``.
85 By default all layers are returned.
86 extra_blocks (ExtraFPNBlock or None): if provided, extra operations will
87 be performed. It is expected to take the fpn features, the original
88 features and the names of the original features as input, and returns
89 a new list of feature maps and their corresponding names. By
90 default a ``LastLevelMaxPool`` is used.
91 """
92 backbone = resnet.__dict__[backbone_name](
93 pretrained=pretrained,
94 norm_layer=norm_layer)
95
96 # select layers that wont be frozen
97 assert 0 <= trainable_layers <= 5
98 layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]
99 # freeze layers only if pretrained backbone is used
100 for name, parameter in backbone.named_parameters():
101 if all([not name.startswith(layer) for layer in layers_to_train]):
102 parameter.requires_grad_(False)
103
104 if extra_blocks is None:
105 extra_blocks = LastLevelMaxPool()
106
107 if returned_layers is None:
108 returned_layers = [1, 2, 3, 4]
109 assert min(returned_layers) > 0 and max(returned_layers) < 5
110 return_layers = {f'layer{k}': str(v) for v, k in enumerate(returned_layers)}
111
112 in_channels_stage2 = backbone.inplanes // 8
113 in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers]
114 out_channels = 256
115 return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)
116
117
118 def _validate_trainable_layers(pretrained, trainable_backbone_layers, max_value, default_value):
119 # dont freeze any layers if pretrained model or backbone is not used
120 if not pretrained:
121 if trainable_backbone_layers is not None:
122 warnings.warn(
123 "Changing trainable_backbone_layers has not effect if "
124 "neither pretrained nor pretrained_backbone have been set to True, "
125 "falling back to trainable_backbone_layers={} so that all layers are trainable".format(max_value))
126 trainable_backbone_layers = max_value
127
128 # by default freeze first blocks
129 if trainable_backbone_layers is None:
130 trainable_backbone_layers = default_value
131 assert 0 <= trainable_backbone_layers <= max_value
132 return trainable_backbone_layers
133
134
135 def mobilenet_backbone(
136 backbone_name,
137 pretrained,
138 fpn,
139 norm_layer=misc_nn_ops.FrozenBatchNorm2d,
140 trainable_layers=2,
141 returned_layers=None,
142 extra_blocks=None
143 ):
144 backbone = mobilenet.__dict__[backbone_name](pretrained=pretrained, norm_layer=norm_layer).features
145
146 # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
147 # The first and last blocks are always included because they are the C0 (conv1) and Cn.
148 stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1]
149 num_stages = len(stage_indices)
150
151 # find the index of the layer from which we wont freeze
152 assert 0 <= trainable_layers <= num_stages
153 freeze_before = num_stages if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]
154
155 # freeze layers only if pretrained backbone is used
156 for b in backbone[:freeze_before]:
157 for parameter in b.parameters():
158 parameter.requires_grad_(False)
159
160 out_channels = 256
161 if fpn:
162 if extra_blocks is None:
163 extra_blocks = LastLevelMaxPool()
164
165 if returned_layers is None:
166 returned_layers = [num_stages - 2, num_stages - 1]
167 assert min(returned_layers) >= 0 and max(returned_layers) < num_stages
168 return_layers = {f'{stage_indices[k]}': str(v) for v, k in enumerate(returned_layers)}
169
170 in_channels_list = [backbone[stage_indices[i]].out_channels for i in returned_layers]
171 return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)
172 else:
173 m = nn.Sequential(
174 backbone,
175 # depthwise linear combination of channels to reduce their size
176 nn.Conv2d(backbone[-1].out_channels, out_channels, 1),
177 )
178 m.out_channels = out_channels
179 return m
180
[end of torchvision/models/detection/backbone_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/models/detection/backbone_utils.py b/torchvision/models/detection/backbone_utils.py
--- a/torchvision/models/detection/backbone_utils.py
+++ b/torchvision/models/detection/backbone_utils.py
@@ -96,7 +96,8 @@
# select layers that wont be frozen
assert 0 <= trainable_layers <= 5
layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]
- # freeze layers only if pretrained backbone is used
+ if trainable_layers == 5:
+ layers_to_train.append('bn1')
for name, parameter in backbone.named_parameters():
if all([not name.startswith(layer) for layer in layers_to_train]):
parameter.requires_grad_(False)
@@ -152,7 +153,6 @@
assert 0 <= trainable_layers <= num_stages
freeze_before = num_stages if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]
- # freeze layers only if pretrained backbone is used
for b in backbone[:freeze_before]:
for parameter in b.parameters():
parameter.requires_grad_(False)
| {"golden_diff": "diff --git a/torchvision/models/detection/backbone_utils.py b/torchvision/models/detection/backbone_utils.py\n--- a/torchvision/models/detection/backbone_utils.py\n+++ b/torchvision/models/detection/backbone_utils.py\n@@ -96,7 +96,8 @@\n # select layers that wont be frozen\n assert 0 <= trainable_layers <= 5\n layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]\n- # freeze layers only if pretrained backbone is used\n+ if trainable_layers == 5:\n+ layers_to_train.append('bn1')\n for name, parameter in backbone.named_parameters():\n if all([not name.startswith(layer) for layer in layers_to_train]):\n parameter.requires_grad_(False)\n@@ -152,7 +153,6 @@\n assert 0 <= trainable_layers <= num_stages\n freeze_before = num_stages if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]\n \n- # freeze layers only if pretrained backbone is used\n for b in backbone[:freeze_before]:\n for parameter in b.parameters():\n parameter.requires_grad_(False)\n", "issue": "ResNet FPN backbone\n## \ud83d\udc1b Bug\r\n\r\nIn https://github.com/pytorch/vision/blob/master/torchvision/models/detection/backbone_utils.py#L99, it says\r\n\r\n> freeze layers only if pretrained backbone is used.\r\n\r\nTherefore, if pretrained backbone is NOT used, any layers should be frozen.\r\n\r\nBut when I run:\r\n```\r\nfrom torchvision.models.detection.backbone_utils import resnet_fpn_backbone\r\nresnet_fpn_backbone('resnet50', pretrained=False, norm_layer=None, trainable_layers=5)\r\n```\r\nI can see that parameters `bn1.weight, bn1.bias, fc.weight, fc.bias` are frozen on the [line 101](https://github.com/pytorch/vision/blob/master/torchvision/models/detection/backbone_utils.py#L101).\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\nRun \r\n```\r\nfrom torchvision.models.detection.backbone_utils import resnet_fpn_backbone\r\nresnet_fpn_backbone('resnet50', pretrained=False, norm_layer=None, trainable_layers=5)\r\n```\r\n\r\nand see that on the [line 101](https://github.com/pytorch/vision/blob/master/torchvision/models/detection/backbone_utils.py#L101) parameters `bn1.weight, bn1.bias, fc.weight, fc.bias` are frozen despite that `trainable_layers=5` which should correspond to no frozen layers as written on the [line 83](https://github.com/pytorch/vision/blob/master/torchvision/models/detection/backbone_utils.py#L83).\r\n\r\n\r\n## Expected behavior\r\n\r\nNo layers are frozen.\r\n\r\n## Environment\r\n\r\nPyTorch version: 1.7.0\r\nIs debug build: True\r\nCUDA used to build PyTorch: None\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: macOS 10.15.7 (x86_64)\r\nGCC version: Could not collect\r\nClang version: 12.0.0 (clang-1200.0.32.29)\r\nCMake version: version 3.18.2\r\n\r\nPython version: 3.8 (64-bit runtime)\r\nIs CUDA available: False\r\nCUDA runtime version: No CUDA\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\n\r\nVersions of relevant libraries:\r\n[pip3] numpy==1.19.2\r\n[pip3] pytorch-lightning==1.1.1\r\n[pip3] pytorch-lightning-bolts==0.2.5rc1\r\n[pip3] torch==1.7.0\r\n[pip3] torchvision==0.8.1\r\n[conda] blas 1.0 mkl\r\n[conda] mkl 2019.5 281 conda-forge\r\n[conda] mkl-service 2.3.0 py38h0b31af3_0 conda-forge\r\n[conda] mkl_fft 1.2.0 py38hc64f4ea_0\r\n[conda] mkl_random 1.1.1 py38h959d312_0\r\n[conda] numpy 1.19.2 py38h456fd55_0\r\n[conda] numpy-base 1.19.2 py38hcfb5961_0\r\n[conda] pytorch 1.7.0 py3.8_0 pytorch\r\n[conda] pytorch-lightning 1.1.1 pyhd8ed1ab_0 conda-forge\r\n[conda] pytorch-lightning-bolts 0.2.5rc1 pypi_0 pypi\r\n[conda] torchvision 0.8.1 py38_cpu pytorch\r\n\r\n\n", "before_files": [{"content": "import warnings\nfrom torch import nn\nfrom torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork, LastLevelMaxPool\n\nfrom torchvision.ops import misc as misc_nn_ops\nfrom .._utils import IntermediateLayerGetter\nfrom .. import mobilenet\nfrom .. import resnet\n\n\nclass BackboneWithFPN(nn.Module):\n \"\"\"\n Adds a FPN on top of a model.\n Internally, it uses torchvision.models._utils.IntermediateLayerGetter to\n extract a submodel that returns the feature maps specified in return_layers.\n The same limitations of IntermediatLayerGetter apply here.\n Args:\n backbone (nn.Module)\n return_layers (Dict[name, new_name]): a dict containing the names\n of the modules for which the activations will be returned as\n the key of the dict, and the value of the dict is the name\n of the returned activation (which the user can specify).\n in_channels_list (List[int]): number of channels for each feature map\n that is returned, in the order they are present in the OrderedDict\n out_channels (int): number of channels in the FPN.\n Attributes:\n out_channels (int): the number of channels in the FPN\n \"\"\"\n def __init__(self, backbone, return_layers, in_channels_list, out_channels, extra_blocks=None):\n super(BackboneWithFPN, self).__init__()\n\n if extra_blocks is None:\n extra_blocks = LastLevelMaxPool()\n\n self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)\n self.fpn = FeaturePyramidNetwork(\n in_channels_list=in_channels_list,\n out_channels=out_channels,\n extra_blocks=extra_blocks,\n )\n self.out_channels = out_channels\n\n def forward(self, x):\n x = self.body(x)\n x = self.fpn(x)\n return x\n\n\ndef resnet_fpn_backbone(\n backbone_name,\n pretrained,\n norm_layer=misc_nn_ops.FrozenBatchNorm2d,\n trainable_layers=3,\n returned_layers=None,\n extra_blocks=None\n):\n \"\"\"\n Constructs a specified ResNet backbone with FPN on top. Freezes the specified number of layers in the backbone.\n\n Examples::\n\n >>> from torchvision.models.detection.backbone_utils import resnet_fpn_backbone\n >>> backbone = resnet_fpn_backbone('resnet50', pretrained=True, trainable_layers=3)\n >>> # get some dummy image\n >>> x = torch.rand(1,3,64,64)\n >>> # compute the output\n >>> output = backbone(x)\n >>> print([(k, v.shape) for k, v in output.items()])\n >>> # returns\n >>> [('0', torch.Size([1, 256, 16, 16])),\n >>> ('1', torch.Size([1, 256, 8, 8])),\n >>> ('2', torch.Size([1, 256, 4, 4])),\n >>> ('3', torch.Size([1, 256, 2, 2])),\n >>> ('pool', torch.Size([1, 256, 1, 1]))]\n\n Args:\n backbone_name (string): resnet architecture. Possible values are 'ResNet', 'resnet18', 'resnet34', 'resnet50',\n 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'\n pretrained (bool): If True, returns a model with backbone pre-trained on Imagenet\n norm_layer (torchvision.ops): it is recommended to use the default value. For details visit:\n (https://github.com/facebookresearch/maskrcnn-benchmark/issues/267)\n trainable_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.\n returned_layers (list of int): The layers of the network to return. Each entry must be in ``[1, 4]``.\n By default all layers are returned.\n extra_blocks (ExtraFPNBlock or None): if provided, extra operations will\n be performed. It is expected to take the fpn features, the original\n features and the names of the original features as input, and returns\n a new list of feature maps and their corresponding names. By\n default a ``LastLevelMaxPool`` is used.\n \"\"\"\n backbone = resnet.__dict__[backbone_name](\n pretrained=pretrained,\n norm_layer=norm_layer)\n\n # select layers that wont be frozen\n assert 0 <= trainable_layers <= 5\n layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]\n # freeze layers only if pretrained backbone is used\n for name, parameter in backbone.named_parameters():\n if all([not name.startswith(layer) for layer in layers_to_train]):\n parameter.requires_grad_(False)\n\n if extra_blocks is None:\n extra_blocks = LastLevelMaxPool()\n\n if returned_layers is None:\n returned_layers = [1, 2, 3, 4]\n assert min(returned_layers) > 0 and max(returned_layers) < 5\n return_layers = {f'layer{k}': str(v) for v, k in enumerate(returned_layers)}\n\n in_channels_stage2 = backbone.inplanes // 8\n in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers]\n out_channels = 256\n return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)\n\n\ndef _validate_trainable_layers(pretrained, trainable_backbone_layers, max_value, default_value):\n # dont freeze any layers if pretrained model or backbone is not used\n if not pretrained:\n if trainable_backbone_layers is not None:\n warnings.warn(\n \"Changing trainable_backbone_layers has not effect if \"\n \"neither pretrained nor pretrained_backbone have been set to True, \"\n \"falling back to trainable_backbone_layers={} so that all layers are trainable\".format(max_value))\n trainable_backbone_layers = max_value\n\n # by default freeze first blocks\n if trainable_backbone_layers is None:\n trainable_backbone_layers = default_value\n assert 0 <= trainable_backbone_layers <= max_value\n return trainable_backbone_layers\n\n\ndef mobilenet_backbone(\n backbone_name,\n pretrained,\n fpn,\n norm_layer=misc_nn_ops.FrozenBatchNorm2d,\n trainable_layers=2,\n returned_layers=None,\n extra_blocks=None\n):\n backbone = mobilenet.__dict__[backbone_name](pretrained=pretrained, norm_layer=norm_layer).features\n\n # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.\n # The first and last blocks are always included because they are the C0 (conv1) and Cn.\n stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, \"_is_cn\", False)] + [len(backbone) - 1]\n num_stages = len(stage_indices)\n\n # find the index of the layer from which we wont freeze\n assert 0 <= trainable_layers <= num_stages\n freeze_before = num_stages if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]\n\n # freeze layers only if pretrained backbone is used\n for b in backbone[:freeze_before]:\n for parameter in b.parameters():\n parameter.requires_grad_(False)\n\n out_channels = 256\n if fpn:\n if extra_blocks is None:\n extra_blocks = LastLevelMaxPool()\n\n if returned_layers is None:\n returned_layers = [num_stages - 2, num_stages - 1]\n assert min(returned_layers) >= 0 and max(returned_layers) < num_stages\n return_layers = {f'{stage_indices[k]}': str(v) for v, k in enumerate(returned_layers)}\n\n in_channels_list = [backbone[stage_indices[i]].out_channels for i in returned_layers]\n return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)\n else:\n m = nn.Sequential(\n backbone,\n # depthwise linear combination of channels to reduce their size\n nn.Conv2d(backbone[-1].out_channels, out_channels, 1),\n )\n m.out_channels = out_channels\n return m\n", "path": "torchvision/models/detection/backbone_utils.py"}]} | 3,771 | 263 |
gh_patches_debug_20797 | rasdani/github-patches | git_diff | ansible-collections__community.general-6370 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't use CreateBiosConfigJob command from idrac_redfish_command module
### Summary
I'm trying to create a job for BIOS configuration.
In the documentation example, specified resource_id is System.Embedded.1
https://github.com/ansible-collections/community.general/blob/926c0a71d03a447580b6a034432608c632477059/plugins/modules/remote_management/redfish/idrac_redfish_command.py#L68
When i use this resource, i have an error message :
> Manager resource System.Embedded.1 not found
explained by:
https://github.com/ansible-collections/community.general/blob/926c0a71d03a447580b6a034432608c632477059/plugins/modules/remote_management/redfish/idrac_redfish_command.py#L201
And of course, if I use iDRAC.embedded.1 which is the actual manager resource ID, task fails with :
> System resource iDRAC.Embedded.1 not found
explained by :
https://github.com/ansible-collections/community.general/blob/926c0a71d03a447580b6a034432608c632477059/plugins/modules/remote_management/redfish/idrac_redfish_command.py#L194
### Issue Type
Bug Report
### Component Name
idrac_redfish_command
### Ansible Version
```console (paste below)
$ ansible --version
ansible 2.10.5
config file = /home/pyfontan/.ansible.cfg
configured module search path = ['/home/pyfontan/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/pyfontan/virtualenv/ansible/lib/python3.8/site-packages/ansible
executable location = /home/pyfontan/virtualenv/ansible/bin/ansible
python version = 3.8.6 (default, Jan 27 2021, 15:42:20) [GCC 10.2.0]
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
iDRAC 4.32.20.00 on PowerEdge C6525
or
iDRAC 3.21.26.22 on PowerEdge R740xd
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: Create BIOS configuration job (schedule BIOS setting update)
community.general.idrac_redfish_command:
category: Systems
command: CreateBiosConfigJob
#resource_id: "{{asset_system_resource_name}}"
#resource_id: "System.Embedded.1"
resource_id: "iDRAC.Embedded.1"
baseuri: "{{ asset.manager.ip }}"
username: "{{ asset.manager.user }}"
password: "{{ asset.manager.password }}"
```
### Expected Results
I expect to have a BIOS Job configuration created.
### Actual Results
```console (paste below)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
</issue>
<code>
[start of plugins/modules/idrac_redfish_command.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright (c) 2018 Dell EMC Inc.
5 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
6 # SPDX-License-Identifier: GPL-3.0-or-later
7
8 from __future__ import absolute_import, division, print_function
9 __metaclass__ = type
10
11 DOCUMENTATION = '''
12 ---
13 module: idrac_redfish_command
14 short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs
15 description:
16 - Builds Redfish URIs locally and sends them to remote OOB controllers to
17 perform an action.
18 - For use with Dell iDRAC operations that require Redfish OEM extensions.
19 extends_documentation_fragment:
20 - community.general.attributes
21 attributes:
22 check_mode:
23 support: none
24 diff_mode:
25 support: none
26 options:
27 category:
28 required: true
29 description:
30 - Category to execute on iDRAC.
31 type: str
32 command:
33 required: true
34 description:
35 - List of commands to execute on iDRAC.
36 type: list
37 elements: str
38 baseuri:
39 required: true
40 description:
41 - Base URI of iDRAC.
42 type: str
43 username:
44 description:
45 - Username for authenticating to iDRAC.
46 type: str
47 password:
48 description:
49 - Password for authenticating to iDRAC.
50 type: str
51 auth_token:
52 description:
53 - Security token for authenticating to iDRAC.
54 type: str
55 version_added: 2.3.0
56 timeout:
57 description:
58 - Timeout in seconds for HTTP requests to iDRAC.
59 default: 10
60 type: int
61 resource_id:
62 required: false
63 description:
64 - ID of the System, Manager or Chassis to modify.
65 type: str
66 version_added: '0.2.0'
67
68 author: "Jose Delarosa (@jose-delarosa)"
69 '''
70
71 EXAMPLES = '''
72 - name: Create BIOS configuration job (schedule BIOS setting update)
73 community.general.idrac_redfish_command:
74 category: Systems
75 command: CreateBiosConfigJob
76 resource_id: System.Embedded.1
77 baseuri: "{{ baseuri }}"
78 username: "{{ username }}"
79 password: "{{ password }}"
80 '''
81
82 RETURN = '''
83 msg:
84 description: Message with action result or error description
85 returned: always
86 type: str
87 sample: "Action was successful"
88 return_values:
89 description: Dictionary containing command-specific response data from the action.
90 returned: on success
91 type: dict
92 version_added: 6.6.0
93 sample: {
94 "job_id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_471269252011"
95 }
96 '''
97
98 import re
99 from ansible.module_utils.basic import AnsibleModule
100 from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
101 from ansible.module_utils.common.text.converters import to_native
102
103
104 class IdracRedfishUtils(RedfishUtils):
105
106 def create_bios_config_job(self):
107 result = {}
108 key = "Bios"
109 jobs = "Jobs"
110
111 # Search for 'key' entry and extract URI from it
112 response = self.get_request(self.root_uri + self.systems_uris[0])
113 if response['ret'] is False:
114 return response
115 result['ret'] = True
116 data = response['data']
117
118 if key not in data:
119 return {'ret': False, 'msg': "Key %s not found" % key}
120
121 bios_uri = data[key]["@odata.id"]
122
123 # Extract proper URI
124 response = self.get_request(self.root_uri + bios_uri)
125 if response['ret'] is False:
126 return response
127 result['ret'] = True
128 data = response['data']
129 set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][
130 "@odata.id"]
131
132 payload = {"TargetSettingsURI": set_bios_attr_uri}
133 response = self.post_request(
134 self.root_uri + self.manager_uri + "/" + jobs, payload)
135 if response['ret'] is False:
136 return response
137
138 response_output = response['resp'].__dict__
139 job_id_full = response_output["headers"]["Location"]
140 job_id = re.search("JID_.+", job_id_full).group()
141 return {'ret': True, 'msg': "Config job %s created" % job_id, 'job_id': job_id_full}
142
143
144 CATEGORY_COMMANDS_ALL = {
145 "Systems": ["CreateBiosConfigJob"],
146 "Accounts": [],
147 "Manager": []
148 }
149
150
151 def main():
152 result = {}
153 return_values = {}
154 module = AnsibleModule(
155 argument_spec=dict(
156 category=dict(required=True),
157 command=dict(required=True, type='list', elements='str'),
158 baseuri=dict(required=True),
159 username=dict(),
160 password=dict(no_log=True),
161 auth_token=dict(no_log=True),
162 timeout=dict(type='int', default=10),
163 resource_id=dict()
164 ),
165 required_together=[
166 ('username', 'password'),
167 ],
168 required_one_of=[
169 ('username', 'auth_token'),
170 ],
171 mutually_exclusive=[
172 ('username', 'auth_token'),
173 ],
174 supports_check_mode=False
175 )
176
177 category = module.params['category']
178 command_list = module.params['command']
179
180 # admin credentials used for authentication
181 creds = {'user': module.params['username'],
182 'pswd': module.params['password'],
183 'token': module.params['auth_token']}
184
185 # timeout
186 timeout = module.params['timeout']
187
188 # System, Manager or Chassis ID to modify
189 resource_id = module.params['resource_id']
190
191 # Build root URI
192 root_uri = "https://" + module.params['baseuri']
193 rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
194 resource_id=resource_id, data_modification=True)
195
196 # Check that Category is valid
197 if category not in CATEGORY_COMMANDS_ALL:
198 module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
199
200 # Check that all commands are valid
201 for cmd in command_list:
202 # Fail if even one command given is invalid
203 if cmd not in CATEGORY_COMMANDS_ALL[category]:
204 module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
205
206 # Organize by Categories / Commands
207
208 if category == "Systems":
209 # execute only if we find a System resource
210 result = rf_utils._find_systems_resource()
211 if result['ret'] is False:
212 module.fail_json(msg=to_native(result['msg']))
213
214 for command in command_list:
215 if command == "CreateBiosConfigJob":
216 # execute only if we find a Managers resource
217 result = rf_utils._find_managers_resource()
218 if result['ret'] is False:
219 module.fail_json(msg=to_native(result['msg']))
220 result = rf_utils.create_bios_config_job()
221 if 'job_id' in result:
222 return_values['job_id'] = result['job_id']
223
224 # Return data back or fail with proper message
225 if result['ret'] is True:
226 del result['ret']
227 module.exit_json(changed=True, msg='Action was successful', return_values=return_values)
228 else:
229 module.fail_json(msg=to_native(result['msg']))
230
231
232 if __name__ == '__main__':
233 main()
234
[end of plugins/modules/idrac_redfish_command.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/modules/idrac_redfish_command.py b/plugins/modules/idrac_redfish_command.py
--- a/plugins/modules/idrac_redfish_command.py
+++ b/plugins/modules/idrac_redfish_command.py
@@ -199,7 +199,20 @@
if category == "Systems":
# execute only if we find a System resource
+ # NOTE: Currently overriding the usage of 'data_modification' due to
+ # how 'resource_id' is processed. In the case of CreateBiosConfigJob,
+ # we interact with BOTH systems and managers, so you currently cannot
+ # specify a single 'resource_id' to make both '_find_systems_resource'
+ # and '_find_managers_resource' return success. Since
+ # CreateBiosConfigJob doesn't use the matched 'resource_id' for a
+ # system regardless of what's specified, disabling the 'resource_id'
+ # inspection for the next call allows a specific manager to be
+ # specified with 'resource_id'. If we ever need to expand the input
+ # to inspect a specific system and manager in parallel, this will need
+ # updates.
+ rf_utils.data_modification = False
result = rf_utils._find_systems_resource()
+ rf_utils.data_modification = True
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
| {"golden_diff": "diff --git a/plugins/modules/idrac_redfish_command.py b/plugins/modules/idrac_redfish_command.py\n--- a/plugins/modules/idrac_redfish_command.py\n+++ b/plugins/modules/idrac_redfish_command.py\n@@ -199,7 +199,20 @@\n \n if category == \"Systems\":\n # execute only if we find a System resource\n+ # NOTE: Currently overriding the usage of 'data_modification' due to\n+ # how 'resource_id' is processed. In the case of CreateBiosConfigJob,\n+ # we interact with BOTH systems and managers, so you currently cannot\n+ # specify a single 'resource_id' to make both '_find_systems_resource'\n+ # and '_find_managers_resource' return success. Since\n+ # CreateBiosConfigJob doesn't use the matched 'resource_id' for a\n+ # system regardless of what's specified, disabling the 'resource_id'\n+ # inspection for the next call allows a specific manager to be\n+ # specified with 'resource_id'. If we ever need to expand the input\n+ # to inspect a specific system and manager in parallel, this will need\n+ # updates.\n+ rf_utils.data_modification = False\n result = rf_utils._find_systems_resource()\n+ rf_utils.data_modification = True\n if result['ret'] is False:\n module.fail_json(msg=to_native(result['msg']))\n", "issue": "Can't use CreateBiosConfigJob command from idrac_redfish_command module\n### Summary\n\nI'm trying to create a job for BIOS configuration.\r\n\r\nIn the documentation example, specified resource_id is System.Embedded.1\r\nhttps://github.com/ansible-collections/community.general/blob/926c0a71d03a447580b6a034432608c632477059/plugins/modules/remote_management/redfish/idrac_redfish_command.py#L68\r\n\r\nWhen i use this resource, i have an error message : \r\n> Manager resource System.Embedded.1 not found\r\n\r\nexplained by:\r\nhttps://github.com/ansible-collections/community.general/blob/926c0a71d03a447580b6a034432608c632477059/plugins/modules/remote_management/redfish/idrac_redfish_command.py#L201\r\n\r\nAnd of course, if I use iDRAC.embedded.1 which is the actual manager resource ID, task fails with :\r\n> System resource iDRAC.Embedded.1 not found\r\n\r\nexplained by :\r\nhttps://github.com/ansible-collections/community.general/blob/926c0a71d03a447580b6a034432608c632477059/plugins/modules/remote_management/redfish/idrac_redfish_command.py#L194\r\n\r\n\r\n\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\nidrac_redfish_command\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible 2.10.5\r\n config file = /home/pyfontan/.ansible.cfg\r\n configured module search path = ['/home/pyfontan/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/pyfontan/virtualenv/ansible/lib/python3.8/site-packages/ansible\r\n executable location = /home/pyfontan/virtualenv/ansible/bin/ansible\r\n python version = 3.8.6 (default, Jan 27 2021, 15:42:20) [GCC 10.2.0]\r\n\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\n\n### OS / Environment\n\niDRAC 4.32.20.00 on PowerEdge C6525\r\nor\r\niDRAC 3.21.26.22 on PowerEdge R740xd\n\n### Steps to Reproduce\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: Create BIOS configuration job (schedule BIOS setting update)\r\n community.general.idrac_redfish_command:\r\n category: Systems\r\n command: CreateBiosConfigJob\r\n #resource_id: \"{{asset_system_resource_name}}\"\r\n #resource_id: \"System.Embedded.1\"\r\n resource_id: \"iDRAC.Embedded.1\"\r\n baseuri: \"{{ asset.manager.ip }}\"\r\n username: \"{{ asset.manager.user }}\"\r\n password: \"{{ asset.manager.password }}\"\r\n ```\r\n\n\n### Expected Results\n\nI expect to have a BIOS Job configuration created.\n\n### Actual Results\n\n```console (paste below)\r\n\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2018 Dell EMC Inc.\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: idrac_redfish_command\nshort_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs\ndescription:\n - Builds Redfish URIs locally and sends them to remote OOB controllers to\n perform an action.\n - For use with Dell iDRAC operations that require Redfish OEM extensions.\nextends_documentation_fragment:\n - community.general.attributes\nattributes:\n check_mode:\n support: none\n diff_mode:\n support: none\noptions:\n category:\n required: true\n description:\n - Category to execute on iDRAC.\n type: str\n command:\n required: true\n description:\n - List of commands to execute on iDRAC.\n type: list\n elements: str\n baseuri:\n required: true\n description:\n - Base URI of iDRAC.\n type: str\n username:\n description:\n - Username for authenticating to iDRAC.\n type: str\n password:\n description:\n - Password for authenticating to iDRAC.\n type: str\n auth_token:\n description:\n - Security token for authenticating to iDRAC.\n type: str\n version_added: 2.3.0\n timeout:\n description:\n - Timeout in seconds for HTTP requests to iDRAC.\n default: 10\n type: int\n resource_id:\n required: false\n description:\n - ID of the System, Manager or Chassis to modify.\n type: str\n version_added: '0.2.0'\n\nauthor: \"Jose Delarosa (@jose-delarosa)\"\n'''\n\nEXAMPLES = '''\n - name: Create BIOS configuration job (schedule BIOS setting update)\n community.general.idrac_redfish_command:\n category: Systems\n command: CreateBiosConfigJob\n resource_id: System.Embedded.1\n baseuri: \"{{ baseuri }}\"\n username: \"{{ username }}\"\n password: \"{{ password }}\"\n'''\n\nRETURN = '''\nmsg:\n description: Message with action result or error description\n returned: always\n type: str\n sample: \"Action was successful\"\nreturn_values:\n description: Dictionary containing command-specific response data from the action.\n returned: on success\n type: dict\n version_added: 6.6.0\n sample: {\n \"job_id\": \"/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_471269252011\"\n }\n'''\n\nimport re\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils\nfrom ansible.module_utils.common.text.converters import to_native\n\n\nclass IdracRedfishUtils(RedfishUtils):\n\n def create_bios_config_job(self):\n result = {}\n key = \"Bios\"\n jobs = \"Jobs\"\n\n # Search for 'key' entry and extract URI from it\n response = self.get_request(self.root_uri + self.systems_uris[0])\n if response['ret'] is False:\n return response\n result['ret'] = True\n data = response['data']\n\n if key not in data:\n return {'ret': False, 'msg': \"Key %s not found\" % key}\n\n bios_uri = data[key][\"@odata.id\"]\n\n # Extract proper URI\n response = self.get_request(self.root_uri + bios_uri)\n if response['ret'] is False:\n return response\n result['ret'] = True\n data = response['data']\n set_bios_attr_uri = data[\"@Redfish.Settings\"][\"SettingsObject\"][\n \"@odata.id\"]\n\n payload = {\"TargetSettingsURI\": set_bios_attr_uri}\n response = self.post_request(\n self.root_uri + self.manager_uri + \"/\" + jobs, payload)\n if response['ret'] is False:\n return response\n\n response_output = response['resp'].__dict__\n job_id_full = response_output[\"headers\"][\"Location\"]\n job_id = re.search(\"JID_.+\", job_id_full).group()\n return {'ret': True, 'msg': \"Config job %s created\" % job_id, 'job_id': job_id_full}\n\n\nCATEGORY_COMMANDS_ALL = {\n \"Systems\": [\"CreateBiosConfigJob\"],\n \"Accounts\": [],\n \"Manager\": []\n}\n\n\ndef main():\n result = {}\n return_values = {}\n module = AnsibleModule(\n argument_spec=dict(\n category=dict(required=True),\n command=dict(required=True, type='list', elements='str'),\n baseuri=dict(required=True),\n username=dict(),\n password=dict(no_log=True),\n auth_token=dict(no_log=True),\n timeout=dict(type='int', default=10),\n resource_id=dict()\n ),\n required_together=[\n ('username', 'password'),\n ],\n required_one_of=[\n ('username', 'auth_token'),\n ],\n mutually_exclusive=[\n ('username', 'auth_token'),\n ],\n supports_check_mode=False\n )\n\n category = module.params['category']\n command_list = module.params['command']\n\n # admin credentials used for authentication\n creds = {'user': module.params['username'],\n 'pswd': module.params['password'],\n 'token': module.params['auth_token']}\n\n # timeout\n timeout = module.params['timeout']\n\n # System, Manager or Chassis ID to modify\n resource_id = module.params['resource_id']\n\n # Build root URI\n root_uri = \"https://\" + module.params['baseuri']\n rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,\n resource_id=resource_id, data_modification=True)\n\n # Check that Category is valid\n if category not in CATEGORY_COMMANDS_ALL:\n module.fail_json(msg=to_native(\"Invalid Category '%s'. Valid Categories = %s\" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))\n\n # Check that all commands are valid\n for cmd in command_list:\n # Fail if even one command given is invalid\n if cmd not in CATEGORY_COMMANDS_ALL[category]:\n module.fail_json(msg=to_native(\"Invalid Command '%s'. Valid Commands = %s\" % (cmd, CATEGORY_COMMANDS_ALL[category])))\n\n # Organize by Categories / Commands\n\n if category == \"Systems\":\n # execute only if we find a System resource\n result = rf_utils._find_systems_resource()\n if result['ret'] is False:\n module.fail_json(msg=to_native(result['msg']))\n\n for command in command_list:\n if command == \"CreateBiosConfigJob\":\n # execute only if we find a Managers resource\n result = rf_utils._find_managers_resource()\n if result['ret'] is False:\n module.fail_json(msg=to_native(result['msg']))\n result = rf_utils.create_bios_config_job()\n if 'job_id' in result:\n return_values['job_id'] = result['job_id']\n\n # Return data back or fail with proper message\n if result['ret'] is True:\n del result['ret']\n module.exit_json(changed=True, msg='Action was successful', return_values=return_values)\n else:\n module.fail_json(msg=to_native(result['msg']))\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/idrac_redfish_command.py"}]} | 3,581 | 313 |
gh_patches_debug_1094 | rasdani/github-patches | git_diff | ESMCI__cime-4035 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cheyenne needs a module load python
Now that we require python 3.5+, we need to do a module load python on cheyenne.
The lack of this module load is responsible for a failure in `J_TestCreateNewcase.test_f_createnewcase_with_user_compset` if you run the whole `J_TestCreateNewcase` suite, and may cause other problems as well.
I'll get a fix in shortly.
</issue>
<code>
[start of scripts/Tools/standard_script_setup.py]
1 """
2 Encapsulate the importing of python utils and logging setup, things
3 that every script should do.
4 """
5 # pylint: disable=unused-import
6
7 import sys, os
8 import __main__ as main
9 _CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..")
10 _LIB_DIR = os.path.join(_CIMEROOT, "scripts", "lib")
11 sys.path.append(_LIB_DIR)
12
13 # Important: Allows external tools to link up with CIME
14 os.environ["CIMEROOT"] = _CIMEROOT
15
16 import CIME.utils
17 CIME.utils.check_minimum_python_version(2, 7)
18 CIME.utils.stop_buffering_output()
19 import logging, argparse
20
[end of scripts/Tools/standard_script_setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/Tools/standard_script_setup.py b/scripts/Tools/standard_script_setup.py
--- a/scripts/Tools/standard_script_setup.py
+++ b/scripts/Tools/standard_script_setup.py
@@ -14,6 +14,6 @@
os.environ["CIMEROOT"] = _CIMEROOT
import CIME.utils
-CIME.utils.check_minimum_python_version(2, 7)
+CIME.utils.check_minimum_python_version(3, 6)
CIME.utils.stop_buffering_output()
import logging, argparse
| {"golden_diff": "diff --git a/scripts/Tools/standard_script_setup.py b/scripts/Tools/standard_script_setup.py\n--- a/scripts/Tools/standard_script_setup.py\n+++ b/scripts/Tools/standard_script_setup.py\n@@ -14,6 +14,6 @@\n os.environ[\"CIMEROOT\"] = _CIMEROOT\n \n import CIME.utils\n-CIME.utils.check_minimum_python_version(2, 7)\n+CIME.utils.check_minimum_python_version(3, 6)\n CIME.utils.stop_buffering_output()\n import logging, argparse\n", "issue": "cheyenne needs a module load python\nNow that we require python 3.5+, we need to do a module load python on cheyenne.\r\n\r\nThe lack of this module load is responsible for a failure in `J_TestCreateNewcase.test_f_createnewcase_with_user_compset` if you run the whole `J_TestCreateNewcase` suite, and may cause other problems as well.\r\n\r\nI'll get a fix in shortly.\n", "before_files": [{"content": "\"\"\"\nEncapsulate the importing of python utils and logging setup, things\nthat every script should do.\n\"\"\"\n# pylint: disable=unused-import\n\nimport sys, os\nimport __main__ as main\n_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\",\"..\")\n_LIB_DIR = os.path.join(_CIMEROOT, \"scripts\", \"lib\")\nsys.path.append(_LIB_DIR)\n\n# Important: Allows external tools to link up with CIME\nos.environ[\"CIMEROOT\"] = _CIMEROOT\n\nimport CIME.utils\nCIME.utils.check_minimum_python_version(2, 7)\nCIME.utils.stop_buffering_output()\nimport logging, argparse\n", "path": "scripts/Tools/standard_script_setup.py"}]} | 817 | 115 |
gh_patches_debug_33119 | rasdani/github-patches | git_diff | nilearn__nilearn-4334 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Globbing should be advertised in doc/examples.
</issue>
<code>
[start of examples/00_tutorials/plot_nilearn_101.py]
1 """
2 Basic nilearn example: manipulating and looking at data
3 =======================================================
4
5 A simple example showing how to load an existing Nifti file and use
6 basic nilearn functionalities.
7 """
8
9 # Let us use a Nifti file that is shipped with nilearn
10 from nilearn.datasets import MNI152_FILE_PATH
11
12 # Note that the variable MNI152_FILE_PATH is just a path to a Nifti file
13 print(f"Path to MNI152 template: {MNI152_FILE_PATH!r}")
14
15 # %%
16 # A first step: looking at our data
17 # ----------------------------------
18 #
19 # Let's quickly plot this file:
20 from nilearn import plotting
21
22 plotting.plot_img(MNI152_FILE_PATH)
23
24 # %%
25 # This is not a very pretty plot. We just used the simplest possible
26 # code. There is a whole :ref:`section of the documentation <plotting>`
27 # on making prettier code.
28 #
29 # **Exercise**: Try plotting one of your own files. In the above,
30 # MNI152_FILE_PATH is nothing more than a string with a path pointing to
31 # a nifti image. You can replace it with a string pointing to a file on
32 # your disk. Note that it should be a 3D volume, and not a 4D volume.
33
34 # %%
35 # Simple image manipulation: smoothing
36 # ------------------------------------
37 #
38 # Let's use an image-smoothing function from nilearn:
39 # :func:`nilearn.image.smooth_img`
40 #
41 # Functions containing 'img' can take either a filename or an image as input.
42 #
43 # Here we give as inputs the image filename and the smoothing value in mm
44 from nilearn import image
45
46 smooth_anat_img = image.smooth_img(MNI152_FILE_PATH, fwhm=3)
47
48 # While we are giving a file name as input, the function returns
49 # an in-memory object:
50 smooth_anat_img
51
52 # %%
53 # This is an in-memory object. We can pass it to nilearn function, for
54 # instance to look at it
55 plotting.plot_img(smooth_anat_img)
56
57 # %%
58 # We could also pass it to the smoothing function
59 more_smooth_anat_img = image.smooth_img(smooth_anat_img, fwhm=3)
60 plotting.plot_img(more_smooth_anat_img)
61
62 # %%
63 # Saving results to a file
64 # -------------------------
65 #
66 # We can save any in-memory object as follows:
67 from pathlib import Path
68
69 output_dir = Path.cwd() / "results" / "plot_nilearn_101"
70 output_dir.mkdir(exist_ok=True, parents=True)
71 print(f"Output will be saved to: {output_dir}")
72 more_smooth_anat_img.to_filename(output_dir / "more_smooth_anat_img.nii.gz")
73
74 # %%
75 # Finally, calling plotting.show() is necessary to display the figure
76 # when running as a script outside IPython
77 plotting.show()
78
79 # %%
80 # |
81 #
82 # ______
83 #
84 # To recap, all the nilearn tools can take data as filenames or in-memory
85 # objects, and return brain volumes as in-memory objects. These can be
86 # passed on to other nilearn tools, or saved to disk.
87
88 # sphinx_gallery_dummy_images=1
89
[end of examples/00_tutorials/plot_nilearn_101.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/00_tutorials/plot_nilearn_101.py b/examples/00_tutorials/plot_nilearn_101.py
--- a/examples/00_tutorials/plot_nilearn_101.py
+++ b/examples/00_tutorials/plot_nilearn_101.py
@@ -59,17 +59,49 @@
more_smooth_anat_img = image.smooth_img(smooth_anat_img, fwhm=3)
plotting.plot_img(more_smooth_anat_img)
+
+# %%
+# Globbing over multiple 3D volumes
+# ---------------------------------
+# Nilearn also supports reading multiple volumes at once,
+# using glob-style patterns.
+# For instance, we can smooth volumes from many subjects
+# at once and get a 4D image as output.
+
+# %%
+# First let's fetch Haxby dataset for subject 1 and 2
+from nilearn import datasets
+
+haxby = datasets.fetch_haxby(subjects=[1, 2])
+
+# %%
+# Now we can find the anatomical images from both
+# subjects using the `*` wildcard
+from pathlib import Path
+
+anats_all_subjects = (
+ Path(datasets.get_data_dirs()[0]) / "haxby2001" / "subj*" / "anat*"
+)
+
+# %%
+# Now we can smooth all the anatomical images at once
+anats_all_subjects_smooth = image.smooth_img(anats_all_subjects, fwhm=5)
+
+# %%
+# This is a 4D image containing one volume per subject
+print(anats_all_subjects_smooth.shape)
+
# %%
# Saving results to a file
# -------------------------
#
# We can save any in-memory object as follows:
-from pathlib import Path
-
output_dir = Path.cwd() / "results" / "plot_nilearn_101"
output_dir.mkdir(exist_ok=True, parents=True)
print(f"Output will be saved to: {output_dir}")
-more_smooth_anat_img.to_filename(output_dir / "more_smooth_anat_img.nii.gz")
+anats_all_subjects_smooth.to_filename(
+ output_dir / "anats_all_subjects_smooth.nii.gz"
+)
# %%
# Finally, calling plotting.show() is necessary to display the figure
@@ -81,8 +113,9 @@
#
# ______
#
-# To recap, all the nilearn tools can take data as filenames or in-memory
-# objects, and return brain volumes as in-memory objects. These can be
+# To recap, all the nilearn tools can take data as filenames or
+# glob-style patterns or in-memory objects, and return brain
+# volumes as in-memory objects. These can be
# passed on to other nilearn tools, or saved to disk.
# sphinx_gallery_dummy_images=1
| {"golden_diff": "diff --git a/examples/00_tutorials/plot_nilearn_101.py b/examples/00_tutorials/plot_nilearn_101.py\n--- a/examples/00_tutorials/plot_nilearn_101.py\n+++ b/examples/00_tutorials/plot_nilearn_101.py\n@@ -59,17 +59,49 @@\n more_smooth_anat_img = image.smooth_img(smooth_anat_img, fwhm=3)\n plotting.plot_img(more_smooth_anat_img)\n \n+\n+# %%\n+# Globbing over multiple 3D volumes\n+# ---------------------------------\n+# Nilearn also supports reading multiple volumes at once,\n+# using glob-style patterns.\n+# For instance, we can smooth volumes from many subjects\n+# at once and get a 4D image as output.\n+\n+# %%\n+# First let's fetch Haxby dataset for subject 1 and 2\n+from nilearn import datasets\n+\n+haxby = datasets.fetch_haxby(subjects=[1, 2])\n+\n+# %%\n+# Now we can find the anatomical images from both\n+# subjects using the `*` wildcard\n+from pathlib import Path\n+\n+anats_all_subjects = (\n+ Path(datasets.get_data_dirs()[0]) / \"haxby2001\" / \"subj*\" / \"anat*\"\n+)\n+\n+# %%\n+# Now we can smooth all the anatomical images at once\n+anats_all_subjects_smooth = image.smooth_img(anats_all_subjects, fwhm=5)\n+\n+# %%\n+# This is a 4D image containing one volume per subject\n+print(anats_all_subjects_smooth.shape)\n+\n # %%\n # Saving results to a file\n # -------------------------\n #\n # We can save any in-memory object as follows:\n-from pathlib import Path\n-\n output_dir = Path.cwd() / \"results\" / \"plot_nilearn_101\"\n output_dir.mkdir(exist_ok=True, parents=True)\n print(f\"Output will be saved to: {output_dir}\")\n-more_smooth_anat_img.to_filename(output_dir / \"more_smooth_anat_img.nii.gz\")\n+anats_all_subjects_smooth.to_filename(\n+ output_dir / \"anats_all_subjects_smooth.nii.gz\"\n+)\n \n # %%\n # Finally, calling plotting.show() is necessary to display the figure\n@@ -81,8 +113,9 @@\n #\n # ______\n #\n-# To recap, all the nilearn tools can take data as filenames or in-memory\n-# objects, and return brain volumes as in-memory objects. These can be\n+# To recap, all the nilearn tools can take data as filenames or\n+# glob-style patterns or in-memory objects, and return brain\n+# volumes as in-memory objects. These can be\n # passed on to other nilearn tools, or saved to disk.\n \n # sphinx_gallery_dummy_images=1\n", "issue": "Globbing should be advertised in doc/examples.\n\n", "before_files": [{"content": "\"\"\"\nBasic nilearn example: manipulating and looking at data\n=======================================================\n\nA simple example showing how to load an existing Nifti file and use\nbasic nilearn functionalities.\n\"\"\"\n\n# Let us use a Nifti file that is shipped with nilearn\nfrom nilearn.datasets import MNI152_FILE_PATH\n\n# Note that the variable MNI152_FILE_PATH is just a path to a Nifti file\nprint(f\"Path to MNI152 template: {MNI152_FILE_PATH!r}\")\n\n# %%\n# A first step: looking at our data\n# ----------------------------------\n#\n# Let's quickly plot this file:\nfrom nilearn import plotting\n\nplotting.plot_img(MNI152_FILE_PATH)\n\n# %%\n# This is not a very pretty plot. We just used the simplest possible\n# code. There is a whole :ref:`section of the documentation <plotting>`\n# on making prettier code.\n#\n# **Exercise**: Try plotting one of your own files. In the above,\n# MNI152_FILE_PATH is nothing more than a string with a path pointing to\n# a nifti image. You can replace it with a string pointing to a file on\n# your disk. Note that it should be a 3D volume, and not a 4D volume.\n\n# %%\n# Simple image manipulation: smoothing\n# ------------------------------------\n#\n# Let's use an image-smoothing function from nilearn:\n# :func:`nilearn.image.smooth_img`\n#\n# Functions containing 'img' can take either a filename or an image as input.\n#\n# Here we give as inputs the image filename and the smoothing value in mm\nfrom nilearn import image\n\nsmooth_anat_img = image.smooth_img(MNI152_FILE_PATH, fwhm=3)\n\n# While we are giving a file name as input, the function returns\n# an in-memory object:\nsmooth_anat_img\n\n# %%\n# This is an in-memory object. We can pass it to nilearn function, for\n# instance to look at it\nplotting.plot_img(smooth_anat_img)\n\n# %%\n# We could also pass it to the smoothing function\nmore_smooth_anat_img = image.smooth_img(smooth_anat_img, fwhm=3)\nplotting.plot_img(more_smooth_anat_img)\n\n# %%\n# Saving results to a file\n# -------------------------\n#\n# We can save any in-memory object as follows:\nfrom pathlib import Path\n\noutput_dir = Path.cwd() / \"results\" / \"plot_nilearn_101\"\noutput_dir.mkdir(exist_ok=True, parents=True)\nprint(f\"Output will be saved to: {output_dir}\")\nmore_smooth_anat_img.to_filename(output_dir / \"more_smooth_anat_img.nii.gz\")\n\n# %%\n# Finally, calling plotting.show() is necessary to display the figure\n# when running as a script outside IPython\nplotting.show()\n\n# %%\n# |\n#\n# ______\n#\n# To recap, all the nilearn tools can take data as filenames or in-memory\n# objects, and return brain volumes as in-memory objects. These can be\n# passed on to other nilearn tools, or saved to disk.\n\n# sphinx_gallery_dummy_images=1\n", "path": "examples/00_tutorials/plot_nilearn_101.py"}]} | 1,435 | 626 |
gh_patches_debug_3966 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-1215 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Headers aren't always case insensitive
Headers like `Content-Length` should preserve case after going trough mitmproxy, because some versions of PHP don't like lowercase headers (this is a server-side misimplementation of [RFC 2616](https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), but it would be nice if mitmproxy was able to handle this.).
Steps to reproduce:
Capture some traffic, replay it with `mitmdump -nc capture_file`. Everything works as intended.
Now replay it, but use a script that replaces the request content. The `Content-Length` header get's changed to `content-lenght`.
The problem probably lies at [netlib/http/message.py](https://github.com/mitmproxy/mitmproxy/blob/master/netlib/http/message.py#L104)
Mitmproxy Version: latest
</issue>
<code>
[start of netlib/multidict.py]
1 from __future__ import absolute_import, print_function, division
2
3 from abc import ABCMeta, abstractmethod
4
5
6 try:
7 from collections.abc import MutableMapping
8 except ImportError: # pragma: no cover
9 from collections import MutableMapping # Workaround for Python < 3.3
10
11 import six
12 from netlib import basetypes
13
14
15 @six.add_metaclass(ABCMeta)
16 class _MultiDict(MutableMapping, basetypes.Serializable):
17 def __repr__(self):
18 fields = (
19 repr(field)
20 for field in self.fields
21 )
22 return "{cls}[{fields}]".format(
23 cls=type(self).__name__,
24 fields=", ".join(fields)
25 )
26
27 @staticmethod
28 @abstractmethod
29 def _reduce_values(values):
30 """
31 If a user accesses multidict["foo"], this method
32 reduces all values for "foo" to a single value that is returned.
33 For example, HTTP headers are folded, whereas we will just take
34 the first cookie we found with that name.
35 """
36
37 @staticmethod
38 @abstractmethod
39 def _kconv(key):
40 """
41 This method converts a key to its canonical representation.
42 For example, HTTP headers are case-insensitive, so this method returns key.lower().
43 """
44
45 def __getitem__(self, key):
46 values = self.get_all(key)
47 if not values:
48 raise KeyError(key)
49 return self._reduce_values(values)
50
51 def __setitem__(self, key, value):
52 self.set_all(key, [value])
53
54 def __delitem__(self, key):
55 if key not in self:
56 raise KeyError(key)
57 key = self._kconv(key)
58 self.fields = tuple(
59 field for field in self.fields
60 if key != self._kconv(field[0])
61 )
62
63 def __iter__(self):
64 seen = set()
65 for key, _ in self.fields:
66 key_kconv = self._kconv(key)
67 if key_kconv not in seen:
68 seen.add(key_kconv)
69 yield key
70
71 def __len__(self):
72 return len(set(self._kconv(key) for key, _ in self.fields))
73
74 def __eq__(self, other):
75 if isinstance(other, MultiDict):
76 return self.fields == other.fields
77 return False
78
79 def __ne__(self, other):
80 return not self.__eq__(other)
81
82 def __hash__(self):
83 return hash(self.fields)
84
85 def get_all(self, key):
86 """
87 Return the list of all values for a given key.
88 If that key is not in the MultiDict, the return value will be an empty list.
89 """
90 key = self._kconv(key)
91 return [
92 value
93 for k, value in self.fields
94 if self._kconv(k) == key
95 ]
96
97 def set_all(self, key, values):
98 """
99 Remove the old values for a key and add new ones.
100 """
101 key_kconv = self._kconv(key)
102
103 new_fields = []
104 for field in self.fields:
105 if self._kconv(field[0]) == key_kconv:
106 if values:
107 new_fields.append(
108 (key, values.pop(0))
109 )
110 else:
111 new_fields.append(field)
112 while values:
113 new_fields.append(
114 (key, values.pop(0))
115 )
116 self.fields = tuple(new_fields)
117
118 def add(self, key, value):
119 """
120 Add an additional value for the given key at the bottom.
121 """
122 self.insert(len(self.fields), key, value)
123
124 def insert(self, index, key, value):
125 """
126 Insert an additional value for the given key at the specified position.
127 """
128 item = (key, value)
129 self.fields = self.fields[:index] + (item,) + self.fields[index:]
130
131 def keys(self, multi=False):
132 """
133 Get all keys.
134
135 Args:
136 multi(bool):
137 If True, one key per value will be returned.
138 If False, duplicate keys will only be returned once.
139 """
140 return (
141 k
142 for k, _ in self.items(multi)
143 )
144
145 def values(self, multi=False):
146 """
147 Get all values.
148
149 Args:
150 multi(bool):
151 If True, all values will be returned.
152 If False, only the first value per key will be returned.
153 """
154 return (
155 v
156 for _, v in self.items(multi)
157 )
158
159 def items(self, multi=False):
160 """
161 Get all (key, value) tuples.
162
163 Args:
164 multi(bool):
165 If True, all (key, value) pairs will be returned
166 If False, only the first (key, value) pair per unique key will be returned.
167 """
168 if multi:
169 return self.fields
170 else:
171 return super(_MultiDict, self).items()
172
173 def clear(self, key):
174 """
175 Removes all items with the specified key, and does not raise an
176 exception if the key does not exist.
177 """
178 if key in self:
179 del self[key]
180
181 def collect(self):
182 """
183 Returns a list of (key, value) tuples, where values are either
184 singular if threre is only one matching item for a key, or a list
185 if there are more than one. The order of the keys matches the order
186 in the underlying fields list.
187 """
188 coll = []
189 for key in self:
190 values = self.get_all(key)
191 if len(values) == 1:
192 coll.append([key, values[0]])
193 else:
194 coll.append([key, values])
195 return coll
196
197 def to_dict(self):
198 """
199 Get the MultiDict as a plain Python dict.
200 Keys with multiple values are returned as lists.
201
202 Example:
203
204 .. code-block:: python
205
206 # Simple dict with duplicate values.
207 >>> d
208 MultiDictView[("name", "value"), ("a", "false"), ("a", "42")]
209 >>> d.to_dict()
210 {
211 "name": "value",
212 "a": ["false", "42"]
213 }
214 """
215 d = {}
216 for k, v in self.collect():
217 d[k] = v
218 return d
219
220 def get_state(self):
221 return self.fields
222
223 def set_state(self, state):
224 self.fields = tuple(tuple(x) for x in state)
225
226 @classmethod
227 def from_state(cls, state):
228 return cls(state)
229
230
231 class MultiDict(_MultiDict):
232 def __init__(self, fields=()):
233 super(MultiDict, self).__init__()
234 self.fields = tuple(
235 tuple(i) for i in fields
236 )
237
238
239 @six.add_metaclass(ABCMeta)
240 class ImmutableMultiDict(MultiDict):
241 def _immutable(self, *_):
242 raise TypeError('{} objects are immutable'.format(self.__class__.__name__))
243
244 __delitem__ = set_all = insert = _immutable
245
246 def with_delitem(self, key):
247 """
248 Returns:
249 An updated ImmutableMultiDict. The original object will not be modified.
250 """
251 ret = self.copy()
252 super(ImmutableMultiDict, ret).__delitem__(key)
253 return ret
254
255 def with_set_all(self, key, values):
256 """
257 Returns:
258 An updated ImmutableMultiDict. The original object will not be modified.
259 """
260 ret = self.copy()
261 super(ImmutableMultiDict, ret).set_all(key, values)
262 return ret
263
264 def with_insert(self, index, key, value):
265 """
266 Returns:
267 An updated ImmutableMultiDict. The original object will not be modified.
268 """
269 ret = self.copy()
270 super(ImmutableMultiDict, ret).insert(index, key, value)
271 return ret
272
273
274 class MultiDictView(_MultiDict):
275 """
276 The MultiDictView provides the MultiDict interface over calculated data.
277 The view itself contains no state - data is retrieved from the parent on
278 request, and stored back to the parent on change.
279 """
280 def __init__(self, getter, setter):
281 self._getter = getter
282 self._setter = setter
283 super(MultiDictView, self).__init__()
284
285 @staticmethod
286 def _kconv(key):
287 # All request-attributes are case-sensitive.
288 return key
289
290 @staticmethod
291 def _reduce_values(values):
292 # We just return the first element if
293 # multiple elements exist with the same key.
294 return values[0]
295
296 @property
297 def fields(self):
298 return self._getter()
299
300 @fields.setter
301 def fields(self, value):
302 return self._setter(value)
303
[end of netlib/multidict.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netlib/multidict.py b/netlib/multidict.py
--- a/netlib/multidict.py
+++ b/netlib/multidict.py
@@ -105,7 +105,7 @@
if self._kconv(field[0]) == key_kconv:
if values:
new_fields.append(
- (key, values.pop(0))
+ (field[0], values.pop(0))
)
else:
new_fields.append(field)
| {"golden_diff": "diff --git a/netlib/multidict.py b/netlib/multidict.py\n--- a/netlib/multidict.py\n+++ b/netlib/multidict.py\n@@ -105,7 +105,7 @@\n if self._kconv(field[0]) == key_kconv:\n if values:\n new_fields.append(\n- (key, values.pop(0))\n+ (field[0], values.pop(0))\n )\n else:\n new_fields.append(field)\n", "issue": "Headers aren't always case insensitive\nHeaders like `Content-Length` should preserve case after going trough mitmproxy, because some versions of PHP don't like lowercase headers (this is a server-side misimplementation of [RFC 2616](https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), but it would be nice if mitmproxy was able to handle this.). \n\nSteps to reproduce:\nCapture some traffic, replay it with `mitmdump -nc capture_file`. Everything works as intended.\nNow replay it, but use a script that replaces the request content. The `Content-Length` header get's changed to `content-lenght`. \n\nThe problem probably lies at [netlib/http/message.py](https://github.com/mitmproxy/mitmproxy/blob/master/netlib/http/message.py#L104)\nMitmproxy Version: latest\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, division\n\nfrom abc import ABCMeta, abstractmethod\n\n\ntry:\n from collections.abc import MutableMapping\nexcept ImportError: # pragma: no cover\n from collections import MutableMapping # Workaround for Python < 3.3\n\nimport six\nfrom netlib import basetypes\n\n\[email protected]_metaclass(ABCMeta)\nclass _MultiDict(MutableMapping, basetypes.Serializable):\n def __repr__(self):\n fields = (\n repr(field)\n for field in self.fields\n )\n return \"{cls}[{fields}]\".format(\n cls=type(self).__name__,\n fields=\", \".join(fields)\n )\n\n @staticmethod\n @abstractmethod\n def _reduce_values(values):\n \"\"\"\n If a user accesses multidict[\"foo\"], this method\n reduces all values for \"foo\" to a single value that is returned.\n For example, HTTP headers are folded, whereas we will just take\n the first cookie we found with that name.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def _kconv(key):\n \"\"\"\n This method converts a key to its canonical representation.\n For example, HTTP headers are case-insensitive, so this method returns key.lower().\n \"\"\"\n\n def __getitem__(self, key):\n values = self.get_all(key)\n if not values:\n raise KeyError(key)\n return self._reduce_values(values)\n\n def __setitem__(self, key, value):\n self.set_all(key, [value])\n\n def __delitem__(self, key):\n if key not in self:\n raise KeyError(key)\n key = self._kconv(key)\n self.fields = tuple(\n field for field in self.fields\n if key != self._kconv(field[0])\n )\n\n def __iter__(self):\n seen = set()\n for key, _ in self.fields:\n key_kconv = self._kconv(key)\n if key_kconv not in seen:\n seen.add(key_kconv)\n yield key\n\n def __len__(self):\n return len(set(self._kconv(key) for key, _ in self.fields))\n\n def __eq__(self, other):\n if isinstance(other, MultiDict):\n return self.fields == other.fields\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(self.fields)\n\n def get_all(self, key):\n \"\"\"\n Return the list of all values for a given key.\n If that key is not in the MultiDict, the return value will be an empty list.\n \"\"\"\n key = self._kconv(key)\n return [\n value\n for k, value in self.fields\n if self._kconv(k) == key\n ]\n\n def set_all(self, key, values):\n \"\"\"\n Remove the old values for a key and add new ones.\n \"\"\"\n key_kconv = self._kconv(key)\n\n new_fields = []\n for field in self.fields:\n if self._kconv(field[0]) == key_kconv:\n if values:\n new_fields.append(\n (key, values.pop(0))\n )\n else:\n new_fields.append(field)\n while values:\n new_fields.append(\n (key, values.pop(0))\n )\n self.fields = tuple(new_fields)\n\n def add(self, key, value):\n \"\"\"\n Add an additional value for the given key at the bottom.\n \"\"\"\n self.insert(len(self.fields), key, value)\n\n def insert(self, index, key, value):\n \"\"\"\n Insert an additional value for the given key at the specified position.\n \"\"\"\n item = (key, value)\n self.fields = self.fields[:index] + (item,) + self.fields[index:]\n\n def keys(self, multi=False):\n \"\"\"\n Get all keys.\n\n Args:\n multi(bool):\n If True, one key per value will be returned.\n If False, duplicate keys will only be returned once.\n \"\"\"\n return (\n k\n for k, _ in self.items(multi)\n )\n\n def values(self, multi=False):\n \"\"\"\n Get all values.\n\n Args:\n multi(bool):\n If True, all values will be returned.\n If False, only the first value per key will be returned.\n \"\"\"\n return (\n v\n for _, v in self.items(multi)\n )\n\n def items(self, multi=False):\n \"\"\"\n Get all (key, value) tuples.\n\n Args:\n multi(bool):\n If True, all (key, value) pairs will be returned\n If False, only the first (key, value) pair per unique key will be returned.\n \"\"\"\n if multi:\n return self.fields\n else:\n return super(_MultiDict, self).items()\n\n def clear(self, key):\n \"\"\"\n Removes all items with the specified key, and does not raise an\n exception if the key does not exist.\n \"\"\"\n if key in self:\n del self[key]\n\n def collect(self):\n \"\"\"\n Returns a list of (key, value) tuples, where values are either\n singular if threre is only one matching item for a key, or a list\n if there are more than one. The order of the keys matches the order\n in the underlying fields list.\n \"\"\"\n coll = []\n for key in self:\n values = self.get_all(key)\n if len(values) == 1:\n coll.append([key, values[0]])\n else:\n coll.append([key, values])\n return coll\n\n def to_dict(self):\n \"\"\"\n Get the MultiDict as a plain Python dict.\n Keys with multiple values are returned as lists.\n\n Example:\n\n .. code-block:: python\n\n # Simple dict with duplicate values.\n >>> d\n MultiDictView[(\"name\", \"value\"), (\"a\", \"false\"), (\"a\", \"42\")]\n >>> d.to_dict()\n {\n \"name\": \"value\",\n \"a\": [\"false\", \"42\"]\n }\n \"\"\"\n d = {}\n for k, v in self.collect():\n d[k] = v\n return d\n\n def get_state(self):\n return self.fields\n\n def set_state(self, state):\n self.fields = tuple(tuple(x) for x in state)\n\n @classmethod\n def from_state(cls, state):\n return cls(state)\n\n\nclass MultiDict(_MultiDict):\n def __init__(self, fields=()):\n super(MultiDict, self).__init__()\n self.fields = tuple(\n tuple(i) for i in fields\n )\n\n\[email protected]_metaclass(ABCMeta)\nclass ImmutableMultiDict(MultiDict):\n def _immutable(self, *_):\n raise TypeError('{} objects are immutable'.format(self.__class__.__name__))\n\n __delitem__ = set_all = insert = _immutable\n\n def with_delitem(self, key):\n \"\"\"\n Returns:\n An updated ImmutableMultiDict. The original object will not be modified.\n \"\"\"\n ret = self.copy()\n super(ImmutableMultiDict, ret).__delitem__(key)\n return ret\n\n def with_set_all(self, key, values):\n \"\"\"\n Returns:\n An updated ImmutableMultiDict. The original object will not be modified.\n \"\"\"\n ret = self.copy()\n super(ImmutableMultiDict, ret).set_all(key, values)\n return ret\n\n def with_insert(self, index, key, value):\n \"\"\"\n Returns:\n An updated ImmutableMultiDict. The original object will not be modified.\n \"\"\"\n ret = self.copy()\n super(ImmutableMultiDict, ret).insert(index, key, value)\n return ret\n\n\nclass MultiDictView(_MultiDict):\n \"\"\"\n The MultiDictView provides the MultiDict interface over calculated data.\n The view itself contains no state - data is retrieved from the parent on\n request, and stored back to the parent on change.\n \"\"\"\n def __init__(self, getter, setter):\n self._getter = getter\n self._setter = setter\n super(MultiDictView, self).__init__()\n\n @staticmethod\n def _kconv(key):\n # All request-attributes are case-sensitive.\n return key\n\n @staticmethod\n def _reduce_values(values):\n # We just return the first element if\n # multiple elements exist with the same key.\n return values[0]\n\n @property\n def fields(self):\n return self._getter()\n\n @fields.setter\n def fields(self, value):\n return self._setter(value)\n", "path": "netlib/multidict.py"}]} | 3,417 | 111 |
gh_patches_debug_30588 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1652 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[META 690] Ensure a minimum retry interval of 5 seconds in fetching central configuration
See meta issue for the description and details:
- Meta issue: https://github.com/elastic/apm/issues/690
</issue>
<code>
[start of elasticapm/transport/http.py]
1 # -*- coding: utf-8 -*-
2
3 # BSD 3-Clause License
4 #
5 # Copyright (c) 2019, Elasticsearch BV
6 # All rights reserved.
7 #
8 # Redistribution and use in source and binary forms, with or without
9 # modification, are permitted provided that the following conditions are met:
10 #
11 # * Redistributions of source code must retain the above copyright notice, this
12 # list of conditions and the following disclaimer.
13 #
14 # * Redistributions in binary form must reproduce the above copyright notice,
15 # this list of conditions and the following disclaimer in the documentation
16 # and/or other materials provided with the distribution.
17 #
18 # * Neither the name of the copyright holder nor the names of its
19 # contributors may be used to endorse or promote products derived from
20 # this software without specific prior written permission.
21 #
22 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
26 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 import hashlib
34 import json
35 import re
36 import ssl
37 import urllib.parse
38 from urllib.request import getproxies_environment, proxy_bypass_environment
39
40 import urllib3
41 from urllib3.exceptions import MaxRetryError, TimeoutError
42
43 from elasticapm.transport.exceptions import TransportException
44 from elasticapm.transport.http_base import HTTPTransportBase
45 from elasticapm.utils import json_encoder, read_pem_file
46 from elasticapm.utils.logging import get_logger
47
48 try:
49 import certifi
50 except ImportError:
51 certifi = None
52
53 logger = get_logger("elasticapm.transport.http")
54
55
56 class Transport(HTTPTransportBase):
57 def __init__(self, url: str, *args, **kwargs) -> None:
58 super(Transport, self).__init__(url, *args, **kwargs)
59 pool_kwargs = {"cert_reqs": "CERT_REQUIRED", "ca_certs": self.ca_certs, "block": True}
60 if url.startswith("https"):
61 if self._server_cert:
62 pool_kwargs.update(
63 {"assert_fingerprint": self.cert_fingerprint, "assert_hostname": False, "cert_reqs": ssl.CERT_NONE}
64 )
65 del pool_kwargs["ca_certs"]
66 elif not self._verify_server_cert:
67 pool_kwargs["cert_reqs"] = ssl.CERT_NONE
68 pool_kwargs["assert_hostname"] = False
69 self._pool_kwargs = pool_kwargs
70 self._http = None
71 self._url = url
72
73 def send(self, data, forced_flush=False):
74 response = None
75
76 headers = self._headers.copy() if self._headers else {}
77 headers.update(self.auth_headers)
78 headers.update(
79 {
80 b"Content-Type": b"application/x-ndjson",
81 b"Content-Encoding": b"gzip",
82 }
83 )
84
85 url = self._url
86 if forced_flush:
87 url = f"{url}?flushed=true"
88 try:
89 try:
90 response = self.http.urlopen(
91 "POST", url, body=data, headers=headers, timeout=self._timeout, preload_content=False
92 )
93 logger.debug("Sent request, url=%s size=%.2fkb status=%s", url, len(data) / 1024.0, response.status)
94 except Exception as e:
95 print_trace = True
96 if isinstance(e, MaxRetryError) and isinstance(e.reason, TimeoutError):
97 message = "Connection to APM Server timed out " "(url: %s, timeout: %s seconds)" % (
98 self._url,
99 self._timeout,
100 )
101 print_trace = False
102 else:
103 message = "Unable to reach APM Server: %s (url: %s)" % (e, self._url)
104 raise TransportException(message, data, print_trace=print_trace)
105 body = response.read()
106 if response.status >= 400:
107 if response.status == 429: # rate-limited
108 message = "Temporarily rate limited: "
109 print_trace = False
110 else:
111 message = "HTTP %s: " % response.status
112 print_trace = True
113 message += body.decode("utf8", errors="replace")[:10000]
114 raise TransportException(message, data, print_trace=print_trace)
115 return response.getheader("Location")
116 finally:
117 if response:
118 response.close()
119
120 @property
121 def http(self) -> urllib3.PoolManager:
122 if not self._http:
123 url_parts = urllib.parse.urlparse(self._url)
124 proxies = getproxies_environment()
125 proxy_url = proxies.get("https", proxies.get("http", None))
126 if proxy_url and not proxy_bypass_environment(url_parts.netloc):
127 self._http = urllib3.ProxyManager(proxy_url, **self._pool_kwargs)
128 else:
129 self._http = urllib3.PoolManager(**self._pool_kwargs)
130 return self._http
131
132 def handle_fork(self) -> None:
133 # reset http pool to avoid sharing connections with the parent process
134 self._http = None
135
136 def get_config(self, current_version=None, keys=None):
137 """
138 Gets configuration from a remote APM Server
139
140 :param current_version: version of the current configuration
141 :param keys: a JSON-serializable dict to identify this instance, e.g.
142 {
143 "service": {
144 "name": "foo",
145 "environment": "bar"
146 }
147 }
148 :return: a three-tuple of new version, config dictionary and validity in seconds.
149 Any element of the tuple can be None.
150 """
151 url = self._config_url
152 data = json_encoder.dumps(keys).encode("utf-8")
153 headers = self._headers.copy()
154 headers[b"Content-Type"] = "application/json"
155 headers.update(self.auth_headers)
156 max_age = 300
157 if current_version:
158 headers["If-None-Match"] = current_version
159 try:
160 response = self.http.urlopen(
161 "POST", url, body=data, headers=headers, timeout=self._timeout, preload_content=False
162 )
163 except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:
164 logger.debug("HTTP error while fetching remote config: %s", str(e))
165 return current_version, None, max_age
166 body = response.read()
167 if "Cache-Control" in response.headers:
168 try:
169 max_age = int(next(re.finditer(r"max-age=(\d+)", response.headers["Cache-Control"])).groups()[0])
170 except StopIteration:
171 logger.debug("Could not parse Cache-Control header: %s", response.headers["Cache-Control"])
172 if response.status == 304:
173 # config is unchanged, return
174 logger.debug("Configuration unchanged")
175 return current_version, None, max_age
176 elif response.status >= 400:
177 return None, None, max_age
178
179 if not body:
180 logger.debug("APM Server answered with empty body and status code %s", response.status)
181 return current_version, None, max_age
182 body = body.decode("utf-8")
183 try:
184 data = json_encoder.loads(body)
185 return response.headers.get("Etag"), data, max_age
186 except json.JSONDecodeError:
187 logger.warning("Failed decoding APM Server response as JSON: %s", body)
188 return current_version, None, max_age
189
190 def _process_queue(self):
191 if not self.client.server_version:
192 self.fetch_server_info()
193 super()._process_queue()
194
195 def fetch_server_info(self):
196 headers = self._headers.copy() if self._headers else {}
197 headers.update(self.auth_headers)
198 headers[b"accept"] = b"text/plain"
199 try:
200 response = self.http.urlopen("GET", self._server_info_url, headers=headers, timeout=self._timeout)
201 body = response.data
202 data = json_encoder.loads(body.decode("utf8"))
203 version = data["version"]
204 logger.debug("Fetched APM Server version %s", version)
205 self.client.server_version = version_string_to_tuple(version)
206 except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:
207 logger.warning("HTTP error while fetching server information: %s", str(e))
208 except json.JSONDecodeError as e:
209 logger.warning("JSON decoding error while fetching server information: %s", str(e))
210 except (KeyError, TypeError):
211 logger.warning("No version key found in server response: %s", response.data)
212
213 @property
214 def cert_fingerprint(self):
215 if self._server_cert:
216 with open(self._server_cert, "rb") as f:
217 cert_data = read_pem_file(f)
218 digest = hashlib.sha256()
219 digest.update(cert_data)
220 return digest.hexdigest()
221 return None
222
223 @property
224 def auth_headers(self):
225 headers = super(Transport, self).auth_headers
226 return {k.encode("ascii"): v.encode("ascii") for k, v in headers.items()}
227
228 @property
229 def ca_certs(self):
230 """
231 Return location of certificate store. If it is available and not disabled via setting,
232 this will return the location of the certifi certificate store.
233 """
234 return certifi.where() if (certifi and self.client.config.use_certifi) else None
235
236
237 def version_string_to_tuple(version):
238 if version:
239 version_parts = re.split(r"[.\-]", version)
240 return tuple(int(p) if p.isdigit() else p for p in version_parts)
241 return ()
242
243
244 # left for backwards compatibility
245 AsyncTransport = Transport
246
[end of elasticapm/transport/http.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/transport/http.py b/elasticapm/transport/http.py
--- a/elasticapm/transport/http.py
+++ b/elasticapm/transport/http.py
@@ -164,11 +164,9 @@
logger.debug("HTTP error while fetching remote config: %s", str(e))
return current_version, None, max_age
body = response.read()
- if "Cache-Control" in response.headers:
- try:
- max_age = int(next(re.finditer(r"max-age=(\d+)", response.headers["Cache-Control"])).groups()[0])
- except StopIteration:
- logger.debug("Could not parse Cache-Control header: %s", response.headers["Cache-Control"])
+
+ max_age = self._get_cache_control_max_age(response.headers) or max_age
+
if response.status == 304:
# config is unchanged, return
logger.debug("Configuration unchanged")
@@ -187,6 +185,22 @@
logger.warning("Failed decoding APM Server response as JSON: %s", body)
return current_version, None, max_age
+ def _get_cache_control_max_age(self, response_headers):
+ max_age = None
+ if "Cache-Control" in response_headers:
+ try:
+ cc_max_age = int(next(re.finditer(r"max-age=(\d+)", response_headers["Cache-Control"])).groups()[0])
+ if cc_max_age <= 0:
+ # max_age remains at default value
+ pass
+ elif cc_max_age < 5:
+ max_age = 5
+ else:
+ max_age = cc_max_age
+ except StopIteration:
+ logger.debug("Could not parse Cache-Control header: %s", response_headers["Cache-Control"])
+ return max_age
+
def _process_queue(self):
if not self.client.server_version:
self.fetch_server_info()
| {"golden_diff": "diff --git a/elasticapm/transport/http.py b/elasticapm/transport/http.py\n--- a/elasticapm/transport/http.py\n+++ b/elasticapm/transport/http.py\n@@ -164,11 +164,9 @@\n logger.debug(\"HTTP error while fetching remote config: %s\", str(e))\n return current_version, None, max_age\n body = response.read()\n- if \"Cache-Control\" in response.headers:\n- try:\n- max_age = int(next(re.finditer(r\"max-age=(\\d+)\", response.headers[\"Cache-Control\"])).groups()[0])\n- except StopIteration:\n- logger.debug(\"Could not parse Cache-Control header: %s\", response.headers[\"Cache-Control\"])\n+\n+ max_age = self._get_cache_control_max_age(response.headers) or max_age\n+\n if response.status == 304:\n # config is unchanged, return\n logger.debug(\"Configuration unchanged\")\n@@ -187,6 +185,22 @@\n logger.warning(\"Failed decoding APM Server response as JSON: %s\", body)\n return current_version, None, max_age\n \n+ def _get_cache_control_max_age(self, response_headers):\n+ max_age = None\n+ if \"Cache-Control\" in response_headers:\n+ try:\n+ cc_max_age = int(next(re.finditer(r\"max-age=(\\d+)\", response_headers[\"Cache-Control\"])).groups()[0])\n+ if cc_max_age <= 0:\n+ # max_age remains at default value\n+ pass\n+ elif cc_max_age < 5:\n+ max_age = 5\n+ else:\n+ max_age = cc_max_age\n+ except StopIteration:\n+ logger.debug(\"Could not parse Cache-Control header: %s\", response_headers[\"Cache-Control\"])\n+ return max_age\n+\n def _process_queue(self):\n if not self.client.server_version:\n self.fetch_server_info()\n", "issue": "[META 690] Ensure a minimum retry interval of 5 seconds in fetching central configuration\nSee meta issue for the description and details:\r\n- Meta issue: https://github.com/elastic/apm/issues/690\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport hashlib\nimport json\nimport re\nimport ssl\nimport urllib.parse\nfrom urllib.request import getproxies_environment, proxy_bypass_environment\n\nimport urllib3\nfrom urllib3.exceptions import MaxRetryError, TimeoutError\n\nfrom elasticapm.transport.exceptions import TransportException\nfrom elasticapm.transport.http_base import HTTPTransportBase\nfrom elasticapm.utils import json_encoder, read_pem_file\nfrom elasticapm.utils.logging import get_logger\n\ntry:\n import certifi\nexcept ImportError:\n certifi = None\n\nlogger = get_logger(\"elasticapm.transport.http\")\n\n\nclass Transport(HTTPTransportBase):\n def __init__(self, url: str, *args, **kwargs) -> None:\n super(Transport, self).__init__(url, *args, **kwargs)\n pool_kwargs = {\"cert_reqs\": \"CERT_REQUIRED\", \"ca_certs\": self.ca_certs, \"block\": True}\n if url.startswith(\"https\"):\n if self._server_cert:\n pool_kwargs.update(\n {\"assert_fingerprint\": self.cert_fingerprint, \"assert_hostname\": False, \"cert_reqs\": ssl.CERT_NONE}\n )\n del pool_kwargs[\"ca_certs\"]\n elif not self._verify_server_cert:\n pool_kwargs[\"cert_reqs\"] = ssl.CERT_NONE\n pool_kwargs[\"assert_hostname\"] = False\n self._pool_kwargs = pool_kwargs\n self._http = None\n self._url = url\n\n def send(self, data, forced_flush=False):\n response = None\n\n headers = self._headers.copy() if self._headers else {}\n headers.update(self.auth_headers)\n headers.update(\n {\n b\"Content-Type\": b\"application/x-ndjson\",\n b\"Content-Encoding\": b\"gzip\",\n }\n )\n\n url = self._url\n if forced_flush:\n url = f\"{url}?flushed=true\"\n try:\n try:\n response = self.http.urlopen(\n \"POST\", url, body=data, headers=headers, timeout=self._timeout, preload_content=False\n )\n logger.debug(\"Sent request, url=%s size=%.2fkb status=%s\", url, len(data) / 1024.0, response.status)\n except Exception as e:\n print_trace = True\n if isinstance(e, MaxRetryError) and isinstance(e.reason, TimeoutError):\n message = \"Connection to APM Server timed out \" \"(url: %s, timeout: %s seconds)\" % (\n self._url,\n self._timeout,\n )\n print_trace = False\n else:\n message = \"Unable to reach APM Server: %s (url: %s)\" % (e, self._url)\n raise TransportException(message, data, print_trace=print_trace)\n body = response.read()\n if response.status >= 400:\n if response.status == 429: # rate-limited\n message = \"Temporarily rate limited: \"\n print_trace = False\n else:\n message = \"HTTP %s: \" % response.status\n print_trace = True\n message += body.decode(\"utf8\", errors=\"replace\")[:10000]\n raise TransportException(message, data, print_trace=print_trace)\n return response.getheader(\"Location\")\n finally:\n if response:\n response.close()\n\n @property\n def http(self) -> urllib3.PoolManager:\n if not self._http:\n url_parts = urllib.parse.urlparse(self._url)\n proxies = getproxies_environment()\n proxy_url = proxies.get(\"https\", proxies.get(\"http\", None))\n if proxy_url and not proxy_bypass_environment(url_parts.netloc):\n self._http = urllib3.ProxyManager(proxy_url, **self._pool_kwargs)\n else:\n self._http = urllib3.PoolManager(**self._pool_kwargs)\n return self._http\n\n def handle_fork(self) -> None:\n # reset http pool to avoid sharing connections with the parent process\n self._http = None\n\n def get_config(self, current_version=None, keys=None):\n \"\"\"\n Gets configuration from a remote APM Server\n\n :param current_version: version of the current configuration\n :param keys: a JSON-serializable dict to identify this instance, e.g.\n {\n \"service\": {\n \"name\": \"foo\",\n \"environment\": \"bar\"\n }\n }\n :return: a three-tuple of new version, config dictionary and validity in seconds.\n Any element of the tuple can be None.\n \"\"\"\n url = self._config_url\n data = json_encoder.dumps(keys).encode(\"utf-8\")\n headers = self._headers.copy()\n headers[b\"Content-Type\"] = \"application/json\"\n headers.update(self.auth_headers)\n max_age = 300\n if current_version:\n headers[\"If-None-Match\"] = current_version\n try:\n response = self.http.urlopen(\n \"POST\", url, body=data, headers=headers, timeout=self._timeout, preload_content=False\n )\n except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:\n logger.debug(\"HTTP error while fetching remote config: %s\", str(e))\n return current_version, None, max_age\n body = response.read()\n if \"Cache-Control\" in response.headers:\n try:\n max_age = int(next(re.finditer(r\"max-age=(\\d+)\", response.headers[\"Cache-Control\"])).groups()[0])\n except StopIteration:\n logger.debug(\"Could not parse Cache-Control header: %s\", response.headers[\"Cache-Control\"])\n if response.status == 304:\n # config is unchanged, return\n logger.debug(\"Configuration unchanged\")\n return current_version, None, max_age\n elif response.status >= 400:\n return None, None, max_age\n\n if not body:\n logger.debug(\"APM Server answered with empty body and status code %s\", response.status)\n return current_version, None, max_age\n body = body.decode(\"utf-8\")\n try:\n data = json_encoder.loads(body)\n return response.headers.get(\"Etag\"), data, max_age\n except json.JSONDecodeError:\n logger.warning(\"Failed decoding APM Server response as JSON: %s\", body)\n return current_version, None, max_age\n\n def _process_queue(self):\n if not self.client.server_version:\n self.fetch_server_info()\n super()._process_queue()\n\n def fetch_server_info(self):\n headers = self._headers.copy() if self._headers else {}\n headers.update(self.auth_headers)\n headers[b\"accept\"] = b\"text/plain\"\n try:\n response = self.http.urlopen(\"GET\", self._server_info_url, headers=headers, timeout=self._timeout)\n body = response.data\n data = json_encoder.loads(body.decode(\"utf8\"))\n version = data[\"version\"]\n logger.debug(\"Fetched APM Server version %s\", version)\n self.client.server_version = version_string_to_tuple(version)\n except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:\n logger.warning(\"HTTP error while fetching server information: %s\", str(e))\n except json.JSONDecodeError as e:\n logger.warning(\"JSON decoding error while fetching server information: %s\", str(e))\n except (KeyError, TypeError):\n logger.warning(\"No version key found in server response: %s\", response.data)\n\n @property\n def cert_fingerprint(self):\n if self._server_cert:\n with open(self._server_cert, \"rb\") as f:\n cert_data = read_pem_file(f)\n digest = hashlib.sha256()\n digest.update(cert_data)\n return digest.hexdigest()\n return None\n\n @property\n def auth_headers(self):\n headers = super(Transport, self).auth_headers\n return {k.encode(\"ascii\"): v.encode(\"ascii\") for k, v in headers.items()}\n\n @property\n def ca_certs(self):\n \"\"\"\n Return location of certificate store. If it is available and not disabled via setting,\n this will return the location of the certifi certificate store.\n \"\"\"\n return certifi.where() if (certifi and self.client.config.use_certifi) else None\n\n\ndef version_string_to_tuple(version):\n if version:\n version_parts = re.split(r\"[.\\-]\", version)\n return tuple(int(p) if p.isdigit() else p for p in version_parts)\n return ()\n\n\n# left for backwards compatibility\nAsyncTransport = Transport\n", "path": "elasticapm/transport/http.py"}]} | 3,408 | 430 |
gh_patches_debug_16643 | rasdani/github-patches | git_diff | sktime__sktime-5330 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] `temporal_train_test_split` does not work on panel datatypes with unequal length series.
**Describe the bug**
<!--
A clear and concise description of what the bug is.
-->
Also relates to #4968
`temporal_train_test_split` wrongly split panel datatypes (splitting per unique unequal time series). It could be that the split function does not support this type yet, If so, it should throw an error msg telling that it does not currently supported the datatypes.
**To Reproduce**
<!--
Add a Minimal, Complete, and Verifiable example (for more details, see e.g. https://stackoverflow.com/help/mcve
If the code is too long, feel free to put it in a public gist and link it in the issue: https://gist.github.com
-->
```python
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.utils._testing.panel import _make_panel
y = _make_panel(n_instances=2, n_timepoints=6)
# make first instance series shorter than the second
y.iloc[4:6] =None
y.dropna(inplace=True)
train_size, test_size = temporal_train_test_split(y, test_size=2)
# show shapes
print(
f"""
{y.shape=}
{train_size.shape=}, {test_size.shape=} #train size should be (6,1) 2+4=6
"""
)
# has also the same issue as #4968 below is the minimal example
train_fh, test_fh = temporal_train_test_split(y, fh=[1,2])
# show shapes
print(
f"""
{y.shape=}
{train_fh.shape=}, {test_fh.shape=} #train size should be (6,1) and test (4,1)
"""
)
```
output
```
y.shape=(10, 1)
train_size.shape=(4, 1), test_size.shape=(4, 1) #train size should be 6 (2+4)
y.shape=(10, 1)
train_fh.shape=(8, 1), test_fh.shape=(2, 1) #train size should be (6,1) and test (4,1)
```
**Versions**
<details>
<!--
Please run the following code snippet and paste the output here:
from sktime import show_versions; show_versions()
-->
main at [3cf69ed](https://github.com/sktime/sktime/commit/3cf69eddba315d6130b661ca5fe8e132e236aa47)
</details>
<!-- Thanks for contributing! -->
</issue>
<code>
[start of sktime/split/__init__.py]
1 """Module for splitters."""
2
3 __all__ = [
4 "CutoffSplitter",
5 "ExpandingGreedySplitter",
6 "ExpandingWindowSplitter",
7 "SameLocSplitter",
8 "SingleWindowSplitter",
9 "SlidingWindowSplitter",
10 "TestPlusTrainSplitter",
11 "temporal_train_test_split",
12 ]
13
14 from sktime.split.cutoff import CutoffSplitter
15 from sktime.split.expandinggreedy import ExpandingGreedySplitter
16 from sktime.split.expandingwindow import ExpandingWindowSplitter
17 from sktime.split.sameloc import SameLocSplitter
18 from sktime.split.singlewindow import SingleWindowSplitter
19 from sktime.split.slidingwindow import SlidingWindowSplitter
20 from sktime.split.temporal_train_test_split import temporal_train_test_split
21 from sktime.split.testplustrain import TestPlusTrainSplitter
22
[end of sktime/split/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sktime/split/__init__.py b/sktime/split/__init__.py
--- a/sktime/split/__init__.py
+++ b/sktime/split/__init__.py
@@ -7,6 +7,7 @@
"SameLocSplitter",
"SingleWindowSplitter",
"SlidingWindowSplitter",
+ "TemporalTrainTestSplitter",
"TestPlusTrainSplitter",
"temporal_train_test_split",
]
@@ -17,5 +18,8 @@
from sktime.split.sameloc import SameLocSplitter
from sktime.split.singlewindow import SingleWindowSplitter
from sktime.split.slidingwindow import SlidingWindowSplitter
-from sktime.split.temporal_train_test_split import temporal_train_test_split
+from sktime.split.temporal_train_test_split import (
+ TemporalTrainTestSplitter,
+ temporal_train_test_split,
+)
from sktime.split.testplustrain import TestPlusTrainSplitter
| {"golden_diff": "diff --git a/sktime/split/__init__.py b/sktime/split/__init__.py\n--- a/sktime/split/__init__.py\n+++ b/sktime/split/__init__.py\n@@ -7,6 +7,7 @@\n \"SameLocSplitter\",\n \"SingleWindowSplitter\",\n \"SlidingWindowSplitter\",\n+ \"TemporalTrainTestSplitter\",\n \"TestPlusTrainSplitter\",\n \"temporal_train_test_split\",\n ]\n@@ -17,5 +18,8 @@\n from sktime.split.sameloc import SameLocSplitter\n from sktime.split.singlewindow import SingleWindowSplitter\n from sktime.split.slidingwindow import SlidingWindowSplitter\n-from sktime.split.temporal_train_test_split import temporal_train_test_split\n+from sktime.split.temporal_train_test_split import (\n+ TemporalTrainTestSplitter,\n+ temporal_train_test_split,\n+)\n from sktime.split.testplustrain import TestPlusTrainSplitter\n", "issue": "[BUG] `temporal_train_test_split` does not work on panel datatypes with unequal length series.\n**Describe the bug**\r\n<!--\r\nA clear and concise description of what the bug is.\r\n-->\r\nAlso relates to #4968 \r\n\r\n`temporal_train_test_split` wrongly split panel datatypes (splitting per unique unequal time series). It could be that the split function does not support this type yet, If so, it should throw an error msg telling that it does not currently supported the datatypes.\r\n\r\n**To Reproduce**\r\n<!--\r\nAdd a Minimal, Complete, and Verifiable example (for more details, see e.g. https://stackoverflow.com/help/mcve\r\n\r\nIf the code is too long, feel free to put it in a public gist and link it in the issue: https://gist.github.com\r\n-->\r\n\r\n```python\r\nfrom sktime.forecasting.model_selection import temporal_train_test_split\r\nfrom sktime.utils._testing.panel import _make_panel\r\n\r\ny = _make_panel(n_instances=2, n_timepoints=6)\r\n# make first instance series shorter than the second\r\ny.iloc[4:6] =None\r\ny.dropna(inplace=True)\r\n\r\ntrain_size, test_size = temporal_train_test_split(y, test_size=2) \r\n# show shapes\r\nprint(\r\n f\"\"\"\r\n {y.shape=} \r\n {train_size.shape=}, {test_size.shape=} #train size should be (6,1) 2+4=6\r\n \"\"\"\r\n)\r\n\r\n# has also the same issue as #4968 below is the minimal example\r\ntrain_fh, test_fh = temporal_train_test_split(y, fh=[1,2]) \r\n# show shapes\r\nprint(\r\n f\"\"\"\r\n {y.shape=} \r\n {train_fh.shape=}, {test_fh.shape=} #train size should be (6,1) and test (4,1)\r\n \"\"\"\r\n)\r\n```\r\n\r\noutput\r\n```\r\ny.shape=(10, 1) \r\ntrain_size.shape=(4, 1), test_size.shape=(4, 1) #train size should be 6 (2+4)\r\n\r\ny.shape=(10, 1) \r\ntrain_fh.shape=(8, 1), test_fh.shape=(2, 1) #train size should be (6,1) and test (4,1)\r\n```\r\n\r\n**Versions**\r\n<details>\r\n\r\n<!--\r\nPlease run the following code snippet and paste the output here:\r\n\r\nfrom sktime import show_versions; show_versions()\r\n-->\r\n main at [3cf69ed](https://github.com/sktime/sktime/commit/3cf69eddba315d6130b661ca5fe8e132e236aa47)\r\n\r\n</details>\r\n\r\n<!-- Thanks for contributing! -->\r\n\n", "before_files": [{"content": "\"\"\"Module for splitters.\"\"\"\n\n__all__ = [\n \"CutoffSplitter\",\n \"ExpandingGreedySplitter\",\n \"ExpandingWindowSplitter\",\n \"SameLocSplitter\",\n \"SingleWindowSplitter\",\n \"SlidingWindowSplitter\",\n \"TestPlusTrainSplitter\",\n \"temporal_train_test_split\",\n]\n\nfrom sktime.split.cutoff import CutoffSplitter\nfrom sktime.split.expandinggreedy import ExpandingGreedySplitter\nfrom sktime.split.expandingwindow import ExpandingWindowSplitter\nfrom sktime.split.sameloc import SameLocSplitter\nfrom sktime.split.singlewindow import SingleWindowSplitter\nfrom sktime.split.slidingwindow import SlidingWindowSplitter\nfrom sktime.split.temporal_train_test_split import temporal_train_test_split\nfrom sktime.split.testplustrain import TestPlusTrainSplitter\n", "path": "sktime/split/__init__.py"}]} | 1,347 | 217 |
gh_patches_debug_28195 | rasdani/github-patches | git_diff | pretix__pretix-2399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Regression: File question required when editing ticket
This is the same issue as #1408, I'm not quite sure whether this should be a new bug because it's a regression or if the old bug should be reopened?
Long story short: We noticed in Pretix 4.3 that customers editing their order will be faced with file questions being `required` even though they have uploaded a file for that question already. The requirement comes from the `input` being marked as `required` and the browsers refusing to submit the form without something in there.
This was fixed in #1443, but it seems this doesn't work anymore. I haven't had time to look into this more, so I don't have a clue where it might fail.
Regression: File question required when editing ticket
This is the same issue as #1408, I'm not quite sure whether this should be a new bug because it's a regression or if the old bug should be reopened?
Long story short: We noticed in Pretix 4.3 that customers editing their order will be faced with file questions being `required` even though they have uploaded a file for that question already. The requirement comes from the `input` being marked as `required` and the browsers refusing to submit the form without something in there.
This was fixed in #1443, but it seems this doesn't work anymore. I haven't had time to look into this more, so I don't have a clue where it might fail.
</issue>
<code>
[start of src/pretix/base/forms/widgets.py]
1 #
2 # This file is part of pretix (Community Edition).
3 #
4 # Copyright (C) 2014-2020 Raphael Michel and contributors
5 # Copyright (C) 2020-2021 rami.io GmbH and contributors
6 #
7 # This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
8 # Public License as published by the Free Software Foundation in version 3 of the License.
9 #
10 # ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
11 # applicable granting you additional permissions and placing additional restrictions on your usage of this software.
12 # Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
13 # this file, see <https://pretix.eu/about/en/license>.
14 #
15 # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
17 # details.
18 #
19 # You should have received a copy of the GNU Affero General Public License along with this program. If not, see
20 # <https://www.gnu.org/licenses/>.
21 #
22
23 # This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
24 # the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
25 #
26 # This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
27 # full history of changes and contributors is available at <https://github.com/pretix/pretix>.
28 #
29 # This file contains Apache-licensed contributions copyrighted by: Felix Schäfer
30 #
31 # Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
32 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
33 # License for the specific language governing permissions and limitations under the License.
34
35 import os
36 from datetime import date
37
38 from django import forms
39 from django.utils.formats import get_format
40 from django.utils.functional import lazy
41 from django.utils.timezone import get_current_timezone, now
42 from django.utils.translation import gettext_lazy as _
43
44
45 class DatePickerWidget(forms.DateInput):
46 def __init__(self, attrs=None, date_format=None):
47 attrs = attrs or {}
48 if 'placeholder' in attrs:
49 del attrs['placeholder']
50 date_attrs = dict(attrs)
51 date_attrs.setdefault('class', 'form-control')
52 date_attrs['class'] += ' datepickerfield'
53 date_attrs['autocomplete'] = 'off'
54
55 def placeholder():
56 df = date_format or get_format('DATE_INPUT_FORMATS')[0]
57 return now().replace(
58 year=2000, month=12, day=31, hour=18, minute=0, second=0, microsecond=0
59 ).strftime(df)
60
61 date_attrs['placeholder'] = lazy(placeholder, str)
62
63 forms.DateInput.__init__(self, date_attrs, date_format)
64
65
66 class TimePickerWidget(forms.TimeInput):
67 def __init__(self, attrs=None, time_format=None):
68 attrs = attrs or {}
69 if 'placeholder' in attrs:
70 del attrs['placeholder']
71 time_attrs = dict(attrs)
72 time_attrs.setdefault('class', 'form-control')
73 time_attrs['class'] += ' timepickerfield'
74 time_attrs['autocomplete'] = 'off'
75
76 def placeholder():
77 tf = time_format or get_format('TIME_INPUT_FORMATS')[0]
78 return now().replace(
79 year=2000, month=1, day=1, hour=0, minute=0, second=0, microsecond=0
80 ).strftime(tf)
81
82 time_attrs['placeholder'] = lazy(placeholder, str)
83
84 forms.TimeInput.__init__(self, time_attrs, time_format)
85
86
87 class UploadedFileWidget(forms.ClearableFileInput):
88 def __init__(self, *args, **kwargs):
89 # Browsers can't recognize that the server already has a file uploaded
90 # Don't mark this input as being required if we already have an answer
91 # (this needs to be done via the attrs, otherwise we wouldn't get the "required" star on the field label)
92 attrs = kwargs.get('attrs', {})
93 if kwargs.get('required') and kwargs.get('initial'):
94 attrs.update({'required': None})
95 kwargs.update({'attrs': attrs})
96
97 self.position = kwargs.pop('position')
98 self.event = kwargs.pop('event')
99 self.answer = kwargs.pop('answer')
100 super().__init__(*args, **kwargs)
101
102 class FakeFile:
103 def __init__(self, file, position, event, answer):
104 self.file = file
105 self.position = position
106 self.event = event
107 self.answer = answer
108
109 def __str__(self):
110 return os.path.basename(self.file.name).split('.', 1)[-1]
111
112 @property
113 def url(self):
114 from pretix.base.models import OrderPosition
115 from pretix.multidomain.urlreverse import eventreverse
116
117 if isinstance(self.position, OrderPosition):
118 return eventreverse(self.event, 'presale:event.order.download.answer', kwargs={
119 'order': self.position.order.code,
120 'secret': self.position.order.secret,
121 'answer': self.answer.pk,
122 })
123 else:
124 return eventreverse(self.event, 'presale:event.cart.download.answer', kwargs={
125 'answer': self.answer.pk,
126 })
127
128 def format_value(self, value):
129 if self.is_initial(value):
130 return self.FakeFile(value, self.position, self.event, self.answer)
131
132
133 class SplitDateTimePickerWidget(forms.SplitDateTimeWidget):
134 template_name = 'pretixbase/forms/widgets/splitdatetime.html'
135
136 def __init__(self, attrs=None, date_format=None, time_format=None, min_date=None, max_date=None):
137 attrs = attrs or {}
138 if 'placeholder' in attrs:
139 del attrs['placeholder']
140 date_attrs = dict(attrs)
141 time_attrs = dict(attrs)
142 date_attrs.setdefault('class', 'form-control splitdatetimepart')
143 time_attrs.setdefault('class', 'form-control splitdatetimepart')
144 date_attrs.setdefault('autocomplete', 'off')
145 time_attrs.setdefault('autocomplete', 'off')
146 date_attrs['class'] += ' datepickerfield'
147 time_attrs['class'] += ' timepickerfield'
148 date_attrs['autocomplete'] = 'off'
149 time_attrs['autocomplete'] = 'off'
150 if min_date:
151 date_attrs['data-min'] = (
152 min_date if isinstance(min_date, date) else min_date.astimezone(get_current_timezone()).date()
153 ).isoformat()
154 if max_date:
155 date_attrs['data-max'] = (
156 max_date if isinstance(max_date, date) else max_date.astimezone(get_current_timezone()).date()
157 ).isoformat()
158
159 def date_placeholder():
160 df = date_format or get_format('DATE_INPUT_FORMATS')[0]
161 return now().replace(
162 year=2000, month=12, day=31, hour=18, minute=0, second=0, microsecond=0
163 ).strftime(df)
164
165 def time_placeholder():
166 tf = time_format or get_format('TIME_INPUT_FORMATS')[0]
167 return now().replace(
168 year=2000, month=1, day=1, hour=0, minute=0, second=0, microsecond=0
169 ).strftime(tf)
170
171 date_attrs['placeholder'] = lazy(date_placeholder, str)
172 time_attrs['placeholder'] = lazy(time_placeholder, str)
173
174 widgets = (
175 forms.DateInput(attrs=date_attrs, format=date_format),
176 forms.TimeInput(attrs=time_attrs, format=time_format),
177 )
178 # Skip one hierarchy level
179 forms.MultiWidget.__init__(self, widgets, attrs)
180
181
182 class BusinessBooleanRadio(forms.RadioSelect):
183 def __init__(self, require_business=False, attrs=None):
184 self.require_business = require_business
185 if self.require_business:
186 choices = (
187 ('business', _('Business or institutional customer')),
188 )
189 else:
190 choices = (
191 ('individual', _('Individual customer')),
192 ('business', _('Business or institutional customer')),
193 )
194 super().__init__(attrs, choices)
195
196 def format_value(self, value):
197 if self.require_business:
198 return 'business'
199 try:
200 return {True: 'business', False: 'individual'}[value]
201 except KeyError:
202 return 'individual'
203
204 def value_from_datadict(self, data, files, name):
205 value = data.get(name)
206 if self.require_business:
207 return True
208 return {
209 'business': True,
210 True: True,
211 'True': True,
212 'individual': False,
213 'False': False,
214 False: False,
215 }.get(value)
216
[end of src/pretix/base/forms/widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pretix/base/forms/widgets.py b/src/pretix/base/forms/widgets.py
--- a/src/pretix/base/forms/widgets.py
+++ b/src/pretix/base/forms/widgets.py
@@ -86,14 +86,6 @@
class UploadedFileWidget(forms.ClearableFileInput):
def __init__(self, *args, **kwargs):
- # Browsers can't recognize that the server already has a file uploaded
- # Don't mark this input as being required if we already have an answer
- # (this needs to be done via the attrs, otherwise we wouldn't get the "required" star on the field label)
- attrs = kwargs.get('attrs', {})
- if kwargs.get('required') and kwargs.get('initial'):
- attrs.update({'required': None})
- kwargs.update({'attrs': attrs})
-
self.position = kwargs.pop('position')
self.event = kwargs.pop('event')
self.answer = kwargs.pop('answer')
@@ -125,6 +117,15 @@
'answer': self.answer.pk,
})
+ def get_context(self, name, value, attrs):
+ # Browsers can't recognize that the server already has a file uploaded
+ # Don't mark this input as being required if we already have an answer
+ # (this needs to be done via the attrs, otherwise we wouldn't get the "required" star on the field label)
+ ctx = super().get_context(name, value, attrs)
+ if ctx['widget']['is_initial']:
+ ctx['widget']['attrs']['required'] = False
+ return ctx
+
def format_value(self, value):
if self.is_initial(value):
return self.FakeFile(value, self.position, self.event, self.answer)
| {"golden_diff": "diff --git a/src/pretix/base/forms/widgets.py b/src/pretix/base/forms/widgets.py\n--- a/src/pretix/base/forms/widgets.py\n+++ b/src/pretix/base/forms/widgets.py\n@@ -86,14 +86,6 @@\n \n class UploadedFileWidget(forms.ClearableFileInput):\n def __init__(self, *args, **kwargs):\n- # Browsers can't recognize that the server already has a file uploaded\n- # Don't mark this input as being required if we already have an answer\n- # (this needs to be done via the attrs, otherwise we wouldn't get the \"required\" star on the field label)\n- attrs = kwargs.get('attrs', {})\n- if kwargs.get('required') and kwargs.get('initial'):\n- attrs.update({'required': None})\n- kwargs.update({'attrs': attrs})\n-\n self.position = kwargs.pop('position')\n self.event = kwargs.pop('event')\n self.answer = kwargs.pop('answer')\n@@ -125,6 +117,15 @@\n 'answer': self.answer.pk,\n })\n \n+ def get_context(self, name, value, attrs):\n+ # Browsers can't recognize that the server already has a file uploaded\n+ # Don't mark this input as being required if we already have an answer\n+ # (this needs to be done via the attrs, otherwise we wouldn't get the \"required\" star on the field label)\n+ ctx = super().get_context(name, value, attrs)\n+ if ctx['widget']['is_initial']:\n+ ctx['widget']['attrs']['required'] = False\n+ return ctx\n+\n def format_value(self, value):\n if self.is_initial(value):\n return self.FakeFile(value, self.position, self.event, self.answer)\n", "issue": "Regression: File question required when editing ticket\nThis is the same issue as #1408, I'm not quite sure whether this should be a new bug because it's a regression or if the old bug should be reopened?\r\n\r\nLong story short: We noticed in Pretix 4.3 that customers editing their order will be faced with file questions being `required` even though they have uploaded a file for that question already. The requirement comes from the `input` being marked as `required` and the browsers refusing to submit the form without something in there.\r\n\r\nThis was fixed in #1443, but it seems this doesn't work anymore. I haven't had time to look into this more, so I don't have a clue where it might fail.\nRegression: File question required when editing ticket\nThis is the same issue as #1408, I'm not quite sure whether this should be a new bug because it's a regression or if the old bug should be reopened?\r\n\r\nLong story short: We noticed in Pretix 4.3 that customers editing their order will be faced with file questions being `required` even though they have uploaded a file for that question already. The requirement comes from the `input` being marked as `required` and the browsers refusing to submit the form without something in there.\r\n\r\nThis was fixed in #1443, but it seems this doesn't work anymore. I haven't had time to look into this more, so I don't have a clue where it might fail.\n", "before_files": [{"content": "#\n# This file is part of pretix (Community Edition).\n#\n# Copyright (C) 2014-2020 Raphael Michel and contributors\n# Copyright (C) 2020-2021 rami.io GmbH and contributors\n#\n# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General\n# Public License as published by the Free Software Foundation in version 3 of the License.\n#\n# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are\n# applicable granting you additional permissions and placing additional restrictions on your usage of this software.\n# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive\n# this file, see <https://pretix.eu/about/en/license>.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied\n# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more\n# details.\n#\n# You should have received a copy of the GNU Affero General Public License along with this program. If not, see\n# <https://www.gnu.org/licenses/>.\n#\n\n# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of\n# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.\n#\n# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A\n# full history of changes and contributors is available at <https://github.com/pretix/pretix>.\n#\n# This file contains Apache-licensed contributions copyrighted by: Felix Sch\u00e4fer\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under the License.\n\nimport os\nfrom datetime import date\n\nfrom django import forms\nfrom django.utils.formats import get_format\nfrom django.utils.functional import lazy\nfrom django.utils.timezone import get_current_timezone, now\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass DatePickerWidget(forms.DateInput):\n def __init__(self, attrs=None, date_format=None):\n attrs = attrs or {}\n if 'placeholder' in attrs:\n del attrs['placeholder']\n date_attrs = dict(attrs)\n date_attrs.setdefault('class', 'form-control')\n date_attrs['class'] += ' datepickerfield'\n date_attrs['autocomplete'] = 'off'\n\n def placeholder():\n df = date_format or get_format('DATE_INPUT_FORMATS')[0]\n return now().replace(\n year=2000, month=12, day=31, hour=18, minute=0, second=0, microsecond=0\n ).strftime(df)\n\n date_attrs['placeholder'] = lazy(placeholder, str)\n\n forms.DateInput.__init__(self, date_attrs, date_format)\n\n\nclass TimePickerWidget(forms.TimeInput):\n def __init__(self, attrs=None, time_format=None):\n attrs = attrs or {}\n if 'placeholder' in attrs:\n del attrs['placeholder']\n time_attrs = dict(attrs)\n time_attrs.setdefault('class', 'form-control')\n time_attrs['class'] += ' timepickerfield'\n time_attrs['autocomplete'] = 'off'\n\n def placeholder():\n tf = time_format or get_format('TIME_INPUT_FORMATS')[0]\n return now().replace(\n year=2000, month=1, day=1, hour=0, minute=0, second=0, microsecond=0\n ).strftime(tf)\n\n time_attrs['placeholder'] = lazy(placeholder, str)\n\n forms.TimeInput.__init__(self, time_attrs, time_format)\n\n\nclass UploadedFileWidget(forms.ClearableFileInput):\n def __init__(self, *args, **kwargs):\n # Browsers can't recognize that the server already has a file uploaded\n # Don't mark this input as being required if we already have an answer\n # (this needs to be done via the attrs, otherwise we wouldn't get the \"required\" star on the field label)\n attrs = kwargs.get('attrs', {})\n if kwargs.get('required') and kwargs.get('initial'):\n attrs.update({'required': None})\n kwargs.update({'attrs': attrs})\n\n self.position = kwargs.pop('position')\n self.event = kwargs.pop('event')\n self.answer = kwargs.pop('answer')\n super().__init__(*args, **kwargs)\n\n class FakeFile:\n def __init__(self, file, position, event, answer):\n self.file = file\n self.position = position\n self.event = event\n self.answer = answer\n\n def __str__(self):\n return os.path.basename(self.file.name).split('.', 1)[-1]\n\n @property\n def url(self):\n from pretix.base.models import OrderPosition\n from pretix.multidomain.urlreverse import eventreverse\n\n if isinstance(self.position, OrderPosition):\n return eventreverse(self.event, 'presale:event.order.download.answer', kwargs={\n 'order': self.position.order.code,\n 'secret': self.position.order.secret,\n 'answer': self.answer.pk,\n })\n else:\n return eventreverse(self.event, 'presale:event.cart.download.answer', kwargs={\n 'answer': self.answer.pk,\n })\n\n def format_value(self, value):\n if self.is_initial(value):\n return self.FakeFile(value, self.position, self.event, self.answer)\n\n\nclass SplitDateTimePickerWidget(forms.SplitDateTimeWidget):\n template_name = 'pretixbase/forms/widgets/splitdatetime.html'\n\n def __init__(self, attrs=None, date_format=None, time_format=None, min_date=None, max_date=None):\n attrs = attrs or {}\n if 'placeholder' in attrs:\n del attrs['placeholder']\n date_attrs = dict(attrs)\n time_attrs = dict(attrs)\n date_attrs.setdefault('class', 'form-control splitdatetimepart')\n time_attrs.setdefault('class', 'form-control splitdatetimepart')\n date_attrs.setdefault('autocomplete', 'off')\n time_attrs.setdefault('autocomplete', 'off')\n date_attrs['class'] += ' datepickerfield'\n time_attrs['class'] += ' timepickerfield'\n date_attrs['autocomplete'] = 'off'\n time_attrs['autocomplete'] = 'off'\n if min_date:\n date_attrs['data-min'] = (\n min_date if isinstance(min_date, date) else min_date.astimezone(get_current_timezone()).date()\n ).isoformat()\n if max_date:\n date_attrs['data-max'] = (\n max_date if isinstance(max_date, date) else max_date.astimezone(get_current_timezone()).date()\n ).isoformat()\n\n def date_placeholder():\n df = date_format or get_format('DATE_INPUT_FORMATS')[0]\n return now().replace(\n year=2000, month=12, day=31, hour=18, minute=0, second=0, microsecond=0\n ).strftime(df)\n\n def time_placeholder():\n tf = time_format or get_format('TIME_INPUT_FORMATS')[0]\n return now().replace(\n year=2000, month=1, day=1, hour=0, minute=0, second=0, microsecond=0\n ).strftime(tf)\n\n date_attrs['placeholder'] = lazy(date_placeholder, str)\n time_attrs['placeholder'] = lazy(time_placeholder, str)\n\n widgets = (\n forms.DateInput(attrs=date_attrs, format=date_format),\n forms.TimeInput(attrs=time_attrs, format=time_format),\n )\n # Skip one hierarchy level\n forms.MultiWidget.__init__(self, widgets, attrs)\n\n\nclass BusinessBooleanRadio(forms.RadioSelect):\n def __init__(self, require_business=False, attrs=None):\n self.require_business = require_business\n if self.require_business:\n choices = (\n ('business', _('Business or institutional customer')),\n )\n else:\n choices = (\n ('individual', _('Individual customer')),\n ('business', _('Business or institutional customer')),\n )\n super().__init__(attrs, choices)\n\n def format_value(self, value):\n if self.require_business:\n return 'business'\n try:\n return {True: 'business', False: 'individual'}[value]\n except KeyError:\n return 'individual'\n\n def value_from_datadict(self, data, files, name):\n value = data.get(name)\n if self.require_business:\n return True\n return {\n 'business': True,\n True: True,\n 'True': True,\n 'individual': False,\n 'False': False,\n False: False,\n }.get(value)\n", "path": "src/pretix/base/forms/widgets.py"}]} | 3,330 | 391 |
gh_patches_debug_15488 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-4193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aioredis raises CancelledError in _finish_span
### Which version of dd-trace-py are you using?
~~0.53.0~~ 0.58.0
### Which version of pip are you using?
21.3.1
### Which version of the libraries are you using?
django==3.2.11
django-redis==5.0.0
channels==3.0.4
daphne==3.0.2
### How can we reproduce your problem?
I am using code similar to the following:
asgi.py
```
import django
from channels.routing import get_default_application
from ddtrace.contrib.asgi import TraceMiddleware
django.setup()
application = TraceMiddleware(get_default_application())
```
routing.py
```
from django.urls import re_path
import my_app.consumers
websocket_urlpatterns = [
re_path(r"^ws/test/$", consumers.TestConsumer.as_asgi()),
]
```
my_app/consumers.py
```
from channels.generic.websocket import WebsocketConsumer
class TestConsumer(WebsocketConsumer):
groups = ["broadcast"]
def connect(self):
self.accept()
def receive(self, text_data=None, bytes_data=None):
raise Exception("An test exception")
```
I am running the application with: `ddtrace-run daphne asgi:application --bind 0.0.0.0 --port 8001`
### What is the result that you get?
I don't get any traces at all, and my logs show this:
```
handle: <Handle traced_13_execute_command.<locals>._finish_span(<Future cancelled>) at /usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py:140>
Traceback (most recent call last):
File "/usr/local/lib/python3.10/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py", line 146, in _finish_span
future.result()
asyncio.exceptions.CancelledError
```
### What is the result that you expected?
No errors
</issue>
<code>
[start of ddtrace/contrib/aioredis/patch.py]
1 import asyncio
2 import sys
3
4 import aioredis
5
6 from ddtrace import config
7 from ddtrace.internal.utils.wrappers import unwrap as _u
8 from ddtrace.pin import Pin
9 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
10
11 from .. import trace_utils
12 from ...constants import ANALYTICS_SAMPLE_RATE_KEY
13 from ...constants import SPAN_MEASURED_KEY
14 from ...ext import SpanTypes
15 from ...ext import net
16 from ...ext import redis as redisx
17 from ...internal.utils.formats import stringify_cache_args
18 from ..redis.util import _trace_redis_cmd
19 from ..redis.util import _trace_redis_execute_pipeline
20
21
22 try:
23 from aioredis.commands.transaction import _RedisBuffer
24 except ImportError:
25 _RedisBuffer = None
26
27 config._add("aioredis", dict(_default_service="redis"))
28
29 aioredis_version_str = getattr(aioredis, "__version__", "0.0.0")
30 aioredis_version = tuple([int(i) for i in aioredis_version_str.split(".")])
31
32
33 def patch():
34 if getattr(aioredis, "_datadog_patch", False):
35 return
36 setattr(aioredis, "_datadog_patch", True)
37 pin = Pin()
38 if aioredis_version >= (2, 0):
39 _w("aioredis.client", "Redis.execute_command", traced_execute_command)
40 _w("aioredis.client", "Redis.pipeline", traced_pipeline)
41 _w("aioredis.client", "Pipeline.execute", traced_execute_pipeline)
42 pin.onto(aioredis.client.Redis)
43 else:
44 _w("aioredis", "Redis.execute", traced_13_execute_command)
45 _w("aioredis", "Redis.pipeline", traced_13_pipeline)
46 _w("aioredis.commands.transaction", "Pipeline.execute", traced_13_execute_pipeline)
47 pin.onto(aioredis.Redis)
48
49
50 def unpatch():
51 if not getattr(aioredis, "_datadog_patch", False):
52 return
53
54 setattr(aioredis, "_datadog_patch", False)
55 if aioredis_version >= (2, 0):
56 _u(aioredis.client.Redis, "execute_command")
57 _u(aioredis.client.Redis, "pipeline")
58 _u(aioredis.client.Pipeline, "execute")
59 else:
60 _u(aioredis.Redis, "execute")
61 _u(aioredis.Redis, "pipeline")
62 _u(aioredis.commands.transaction.Pipeline, "execute")
63
64
65 async def traced_execute_command(func, instance, args, kwargs):
66 pin = Pin.get_from(instance)
67 if not pin or not pin.enabled():
68 return await func(*args, **kwargs)
69
70 with _trace_redis_cmd(pin, config.aioredis, instance, args):
71 return await func(*args, **kwargs)
72
73
74 def traced_pipeline(func, instance, args, kwargs):
75 pipeline = func(*args, **kwargs)
76 pin = Pin.get_from(instance)
77 if pin:
78 pin.onto(pipeline)
79 return pipeline
80
81
82 async def traced_execute_pipeline(func, instance, args, kwargs):
83 pin = Pin.get_from(instance)
84 if not pin or not pin.enabled():
85 return await func(*args, **kwargs)
86
87 cmds = [stringify_cache_args(c) for c, _ in instance.command_stack]
88 resource = "\n".join(cmds)
89 with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):
90 return await func(*args, **kwargs)
91
92
93 def traced_13_pipeline(func, instance, args, kwargs):
94 pipeline = func(*args, **kwargs)
95 pin = Pin.get_from(instance)
96 if pin:
97 pin.onto(pipeline)
98 return pipeline
99
100
101 def traced_13_execute_command(func, instance, args, kwargs):
102 # If we have a _RedisBuffer then we are in a pipeline
103 if isinstance(instance.connection, _RedisBuffer):
104 return func(*args, **kwargs)
105
106 pin = Pin.get_from(instance)
107 if not pin or not pin.enabled():
108 return func(*args, **kwargs)
109
110 # Don't activate the span since this operation is performed as a future which concludes sometime later on in
111 # execution so subsequent operations in the stack are not necessarily semantically related
112 # (we don't want this span to be the parent of all other spans created before the future is resolved)
113 parent = pin.tracer.current_span()
114 span = pin.tracer.start_span(
115 redisx.CMD,
116 service=trace_utils.ext_service(pin, config.aioredis),
117 span_type=SpanTypes.REDIS,
118 activate=False,
119 child_of=parent,
120 )
121
122 span.set_tag(SPAN_MEASURED_KEY)
123 query = stringify_cache_args(args)
124 span.resource = query
125 span.set_tag(redisx.RAWCMD, query)
126 if pin.tags:
127 span.set_tags(pin.tags)
128
129 span.set_tags(
130 {
131 net.TARGET_HOST: instance.address[0],
132 net.TARGET_PORT: instance.address[1],
133 redisx.DB: instance.db or 0,
134 }
135 )
136 span.set_metric(redisx.ARGS_LEN, len(args))
137 # set analytics sample rate if enabled
138 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())
139
140 def _finish_span(future):
141 try:
142 # Accessing the result will raise an exception if:
143 # - The future was cancelled
144 # - There was an error executing the future (`future.exception()`)
145 # - The future is in an invalid state
146 future.result()
147 except Exception:
148 span.set_exc_info(*sys.exc_info())
149 finally:
150 span.finish()
151
152 task = func(*args, **kwargs)
153 # Execute command returns a coroutine when no free connections are available
154 # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191
155 task = asyncio.ensure_future(task)
156 task.add_done_callback(_finish_span)
157 return task
158
159
160 async def traced_13_execute_pipeline(func, instance, args, kwargs):
161 pin = Pin.get_from(instance)
162 if not pin or not pin.enabled():
163 return await func(*args, **kwargs)
164
165 cmds = []
166 for _, cmd, cmd_args, _ in instance._pipeline:
167 parts = [cmd]
168 parts.extend(cmd_args)
169 cmds.append(stringify_cache_args(parts))
170 resource = "\n".join(cmds)
171 with pin.tracer.trace(
172 redisx.CMD,
173 resource=resource,
174 service=trace_utils.ext_service(pin, config.aioredis),
175 span_type=SpanTypes.REDIS,
176 ) as span:
177
178 span.set_tags(
179 {
180 net.TARGET_HOST: instance._pool_or_conn.address[0],
181 net.TARGET_PORT: instance._pool_or_conn.address[1],
182 redisx.DB: instance._pool_or_conn.db or 0,
183 }
184 )
185
186 span.set_tag(SPAN_MEASURED_KEY)
187 span.set_tag(redisx.RAWCMD, resource)
188 span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))
189 # set analytics sample rate if enabled
190 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())
191
192 return await func(*args, **kwargs)
193
[end of ddtrace/contrib/aioredis/patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py
--- a/ddtrace/contrib/aioredis/patch.py
+++ b/ddtrace/contrib/aioredis/patch.py
@@ -140,11 +140,12 @@
def _finish_span(future):
try:
# Accessing the result will raise an exception if:
- # - The future was cancelled
+ # - The future was cancelled (CancelledError)
# - There was an error executing the future (`future.exception()`)
# - The future is in an invalid state
future.result()
- except Exception:
+ # CancelledError exceptions extend from BaseException as of Python 3.8, instead of usual Exception
+ except BaseException:
span.set_exc_info(*sys.exc_info())
finally:
span.finish()
| {"golden_diff": "diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py\n--- a/ddtrace/contrib/aioredis/patch.py\n+++ b/ddtrace/contrib/aioredis/patch.py\n@@ -140,11 +140,12 @@\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n- # - The future was cancelled\n+ # - The future was cancelled (CancelledError)\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n- except Exception:\n+ # CancelledError exceptions extend from BaseException as of Python 3.8, instead of usual Exception\n+ except BaseException:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n", "issue": "aioredis raises CancelledError in _finish_span \n### Which version of dd-trace-py are you using?\r\n\r\n~~0.53.0~~ 0.58.0\r\n\r\n### Which version of pip are you using?\r\n\r\n21.3.1\r\n\r\n### Which version of the libraries are you using?\r\n\r\ndjango==3.2.11\r\ndjango-redis==5.0.0\r\nchannels==3.0.4\r\ndaphne==3.0.2\r\n\r\n### How can we reproduce your problem?\r\n\r\nI am using code similar to the following:\r\n\r\nasgi.py\r\n\r\n```\r\nimport django\r\nfrom channels.routing import get_default_application\r\nfrom ddtrace.contrib.asgi import TraceMiddleware\r\n\r\ndjango.setup()\r\napplication = TraceMiddleware(get_default_application())\r\n```\r\n\r\nrouting.py\r\n\r\n```\r\nfrom django.urls import re_path\r\nimport my_app.consumers\r\n\r\nwebsocket_urlpatterns = [\r\n re_path(r\"^ws/test/$\", consumers.TestConsumer.as_asgi()),\r\n]\r\n```\r\n\r\nmy_app/consumers.py\r\n\r\n```\r\nfrom channels.generic.websocket import WebsocketConsumer\r\n\r\nclass TestConsumer(WebsocketConsumer):\r\n groups = [\"broadcast\"]\r\n\r\n def connect(self):\r\n self.accept()\r\n\r\n def receive(self, text_data=None, bytes_data=None):\r\n raise Exception(\"An test exception\")\r\n```\r\n\r\nI am running the application with: `ddtrace-run daphne asgi:application --bind 0.0.0.0 --port 8001`\r\n\r\n### What is the result that you get?\r\n\r\nI don't get any traces at all, and my logs show this:\r\n\r\n```\r\nhandle: <Handle traced_13_execute_command.<locals>._finish_span(<Future cancelled>) at /usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py:140>\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.10/asyncio/events.py\", line 80, in _run\r\n self._context.run(self._callback, *self._args)\r\n File \"/usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py\", line 146, in _finish_span\r\n future.result()\r\nasyncio.exceptions.CancelledError\r\n```\r\n\r\n\r\n### What is the result that you expected?\r\n\r\nNo errors\r\n\n", "before_files": [{"content": "import asyncio\nimport sys\n\nimport aioredis\n\nfrom ddtrace import config\nfrom ddtrace.internal.utils.wrappers import unwrap as _u\nfrom ddtrace.pin import Pin\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import net\nfrom ...ext import redis as redisx\nfrom ...internal.utils.formats import stringify_cache_args\nfrom ..redis.util import _trace_redis_cmd\nfrom ..redis.util import _trace_redis_execute_pipeline\n\n\ntry:\n from aioredis.commands.transaction import _RedisBuffer\nexcept ImportError:\n _RedisBuffer = None\n\nconfig._add(\"aioredis\", dict(_default_service=\"redis\"))\n\naioredis_version_str = getattr(aioredis, \"__version__\", \"0.0.0\")\naioredis_version = tuple([int(i) for i in aioredis_version_str.split(\".\")])\n\n\ndef patch():\n if getattr(aioredis, \"_datadog_patch\", False):\n return\n setattr(aioredis, \"_datadog_patch\", True)\n pin = Pin()\n if aioredis_version >= (2, 0):\n _w(\"aioredis.client\", \"Redis.execute_command\", traced_execute_command)\n _w(\"aioredis.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"aioredis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n pin.onto(aioredis.client.Redis)\n else:\n _w(\"aioredis\", \"Redis.execute\", traced_13_execute_command)\n _w(\"aioredis\", \"Redis.pipeline\", traced_13_pipeline)\n _w(\"aioredis.commands.transaction\", \"Pipeline.execute\", traced_13_execute_pipeline)\n pin.onto(aioredis.Redis)\n\n\ndef unpatch():\n if not getattr(aioredis, \"_datadog_patch\", False):\n return\n\n setattr(aioredis, \"_datadog_patch\", False)\n if aioredis_version >= (2, 0):\n _u(aioredis.client.Redis, \"execute_command\")\n _u(aioredis.client.Redis, \"pipeline\")\n _u(aioredis.client.Pipeline, \"execute\")\n else:\n _u(aioredis.Redis, \"execute\")\n _u(aioredis.Redis, \"pipeline\")\n _u(aioredis.commands.transaction.Pipeline, \"execute\")\n\n\nasync def traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.aioredis, instance, args):\n return await func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\nasync def traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = [stringify_cache_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):\n return await func(*args, **kwargs)\n\n\ndef traced_13_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_13_execute_command(func, instance, args, kwargs):\n # If we have a _RedisBuffer then we are in a pipeline\n if isinstance(instance.connection, _RedisBuffer):\n return func(*args, **kwargs)\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n # Don't activate the span since this operation is performed as a future which concludes sometime later on in\n # execution so subsequent operations in the stack are not necessarily semantically related\n # (we don't want this span to be the parent of all other spans created before the future is resolved)\n parent = pin.tracer.current_span()\n span = pin.tracer.start_span(\n redisx.CMD,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n activate=False,\n child_of=parent,\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n query = stringify_cache_args(args)\n span.resource = query\n span.set_tag(redisx.RAWCMD, query)\n if pin.tags:\n span.set_tags(pin.tags)\n\n span.set_tags(\n {\n net.TARGET_HOST: instance.address[0],\n net.TARGET_PORT: instance.address[1],\n redisx.DB: instance.db or 0,\n }\n )\n span.set_metric(redisx.ARGS_LEN, len(args))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n # - The future was cancelled\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n except Exception:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n\n task = func(*args, **kwargs)\n # Execute command returns a coroutine when no free connections are available\n # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191\n task = asyncio.ensure_future(task)\n task.add_done_callback(_finish_span)\n return task\n\n\nasync def traced_13_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = []\n for _, cmd, cmd_args, _ in instance._pipeline:\n parts = [cmd]\n parts.extend(cmd_args)\n cmds.append(stringify_cache_args(parts))\n resource = \"\\n\".join(cmds)\n with pin.tracer.trace(\n redisx.CMD,\n resource=resource,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n ) as span:\n\n span.set_tags(\n {\n net.TARGET_HOST: instance._pool_or_conn.address[0],\n net.TARGET_PORT: instance._pool_or_conn.address[1],\n redisx.DB: instance._pool_or_conn.db or 0,\n }\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(redisx.RAWCMD, resource)\n span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n return await func(*args, **kwargs)\n", "path": "ddtrace/contrib/aioredis/patch.py"}]} | 3,106 | 202 |
gh_patches_debug_47400 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-2132 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for Py 3.9
With Py 3.9 out, we should add it to the tests at some point. Maybe that can wait, until 3.9.x, x>0 is out, though
Need to check, if all the job thingies work out, as APS doesn't support py3.9 yet and there has been a [report](https://t.me/pythontelegrambotgroup/382731) that it doesn't work (with PTB).
On a related note: APS seems to be [preparing for v4.0](https://github.com/agronholm/apscheduler/issues/465), which will break some stuff, but also supports py3.9 and even uses the new ZoneInfo (also backporting to py3.6+), lifting the restriction to use `pytz` timezones. I already subscribed to releases. I guess updating APS in PTB should be done only when 4.x, x>0 is out and we're doing breaking things anyway …
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """The setup and build script for the python-telegram-bot library."""
3
4 import codecs
5 import os
6 import sys
7
8 from setuptools import setup, find_packages
9
10
11 def requirements():
12 """Build the requirements list for this project"""
13 requirements_list = []
14
15 with open('requirements.txt') as requirements:
16 for install in requirements:
17 requirements_list.append(install.strip())
18
19 return requirements_list
20
21
22 packages = find_packages(exclude=['tests*'])
23 requirements = requirements()
24
25 # Allow for a package install to not use the vendored urllib3
26 UPSTREAM_URLLIB3_FLAG = '--with-upstream-urllib3'
27 if UPSTREAM_URLLIB3_FLAG in sys.argv:
28 sys.argv.remove(UPSTREAM_URLLIB3_FLAG)
29 requirements.append('urllib3 >= 1.19.1')
30 packages = [x for x in packages if not x.startswith('telegram.vendor.ptb_urllib3')]
31
32 with codecs.open('README.rst', 'r', 'utf-8') as fd:
33 fn = os.path.join('telegram', 'version.py')
34 with open(fn) as fh:
35 code = compile(fh.read(), fn, 'exec')
36 exec(code)
37
38 setup(name='python-telegram-bot',
39 version=__version__,
40 author='Leandro Toledo',
41 author_email='[email protected]',
42 license='LGPLv3',
43 url='https://python-telegram-bot.org/',
44 keywords='python telegram bot api wrapper',
45 description="We have made you a wrapper you can't refuse",
46 long_description=fd.read(),
47 packages=packages,
48 install_requires=requirements,
49 extras_require={
50 'json': 'ujson',
51 'socks': 'PySocks'
52 },
53 include_package_data=True,
54 classifiers=[
55 'Development Status :: 5 - Production/Stable',
56 'Intended Audience :: Developers',
57 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
58 'Operating System :: OS Independent',
59 'Topic :: Software Development :: Libraries :: Python Modules',
60 'Topic :: Communications :: Chat',
61 'Topic :: Internet',
62 'Programming Language :: Python',
63 'Programming Language :: Python :: 3',
64 'Programming Language :: Python :: 3.6',
65 'Programming Language :: Python :: 3.7',
66 'Programming Language :: Python :: 3.8',
67 ],)
68
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -64,4 +64,5 @@
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
],)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -64,4 +64,5 @@\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n+ 'Programming Language :: Python :: 3.9',\n ],)\n", "issue": "Add support for Py 3.9\nWith Py 3.9 out, we should add it to the tests at some point. Maybe that can wait, until 3.9.x, x>0 is out, though\r\n\r\nNeed to check, if all the job thingies work out, as APS doesn't support py3.9 yet and there has been a [report](https://t.me/pythontelegrambotgroup/382731) that it doesn't work (with PTB).\r\n\r\nOn a related note: APS seems to be [preparing for v4.0](https://github.com/agronholm/apscheduler/issues/465), which will break some stuff, but also supports py3.9 and even uses the new ZoneInfo (also backporting to py3.6+), lifting the restriction to use `pytz` timezones. I already subscribed to releases. I guess updating APS in PTB should be done only when 4.x, x>0 is out and we're doing breaking things anyway \u2026\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\n\npackages = find_packages(exclude=['tests*'])\nrequirements = requirements()\n\n# Allow for a package install to not use the vendored urllib3\nUPSTREAM_URLLIB3_FLAG = '--with-upstream-urllib3'\nif UPSTREAM_URLLIB3_FLAG in sys.argv:\n sys.argv.remove(UPSTREAM_URLLIB3_FLAG)\n requirements.append('urllib3 >= 1.19.1')\n packages = [x for x in packages if not x.startswith('telegram.vendor.ptb_urllib3')]\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n fn = os.path.join('telegram', 'version.py')\n with open(fn) as fh:\n code = compile(fh.read(), fn, 'exec')\n exec(code)\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='[email protected]',\n license='LGPLv3',\n url='https://python-telegram-bot.org/',\n keywords='python telegram bot api wrapper',\n description=\"We have made you a wrapper you can't refuse\",\n long_description=fd.read(),\n packages=packages,\n install_requires=requirements,\n extras_require={\n 'json': 'ujson',\n 'socks': 'PySocks'\n },\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],)\n", "path": "setup.py"}]} | 1,393 | 84 |
gh_patches_debug_15216 | rasdani/github-patches | git_diff | translate__translate-4027 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix properties roundtrip
After #3607 some changes need to be introduced in order to fix properties roundtrip. More details in https://github.com/translate/translate/pull/3607#issuecomment-291440437
</issue>
<code>
[start of translate/convert/po2prop.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2002-2006 Zuza Software Foundation
4 #
5 # This file is part of translate.
6 #
7 # translate is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
11 #
12 # translate is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
19
20 """Convert Gettext PO localization files to Java/Mozilla .properties files.
21
22 See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/prop2po.html
23 for examples and usage instructions.
24 """
25
26 import warnings
27
28 from translate.convert import accesskey, convert
29 from translate.misc import quote
30 from translate.storage import po, properties
31
32
33 eol = u"\n"
34
35
36 def applytranslation(key, propunit, inunit, mixedkeys):
37 """applies the translation for key in the po unit to the prop unit"""
38 # this converts the po-style string to a prop-style string
39 value = inunit.target
40 # handle mixed keys
41 for labelsuffix in properties.labelsuffixes:
42 if key.endswith(labelsuffix):
43 if key in mixedkeys:
44 value, akey = accesskey.extract(value)
45 break
46 else:
47 for akeysuffix in properties.accesskeysuffixes:
48 if key.endswith(akeysuffix):
49 if key in mixedkeys:
50 label, value = accesskey.extract(value)
51 if not value:
52 warnings.warn("Could not find accesskey for %s" % key)
53 # Use the source language accesskey
54 label, value = accesskey.extract(inunit.source)
55 else:
56 original = propunit.source
57 # For the sake of diffs we keep the case of the
58 # accesskey the same if we know the translation didn't
59 # change. Casing matters in XUL.
60 if value == propunit.source and original.lower() == value.lower():
61 if original.isupper():
62 value = value.upper()
63 elif original.islower():
64 value = value.lower()
65 return value
66
67
68 class reprop:
69
70 def __init__(self, templatefile, inputstore, personality, encoding=None,
71 remove_untranslated=False):
72 self.templatefile = templatefile
73 self.inputstore = inputstore
74 self.personality = properties.get_dialect(personality)
75 self.encoding = encoding
76 if self.encoding is None:
77 self.encoding = self.personality.default_encoding
78 self.remove_untranslated = remove_untranslated
79 self.mixer = accesskey.UnitMixer(properties.labelsuffixes,
80 properties.accesskeysuffixes)
81
82 def convertstore(self, includefuzzy=False):
83 self.includefuzzy = includefuzzy
84 self.inmultilinemsgid = False
85 self.inecho = False
86 self.inputstore.makeindex()
87 if self.personality.name == "gaia":
88 self._explode_gaia_plurals()
89 outputlines = []
90 # Readlines doesn't work for UTF-16, we read() and splitlines(keepends) instead
91 content = self.templatefile.read().decode(self.encoding)
92 for line in content.splitlines(True):
93 outputstr = self.convertline(line)
94 outputlines.append(outputstr)
95 return u"".join(outputlines).encode(self.encoding)
96
97 def _handle_accesskeys(self, inunit, currkey):
98 value = inunit.target
99 if self.personality.name == "mozilla":
100 keys = inunit.getlocations()
101 mixedkeys = self.mixer.match_entities(keys)
102 for key in keys:
103 if key == currkey and key in self.inputstore.locationindex:
104 propunit = self.inputstore.locationindex[key] # find the prop
105 value = applytranslation(key, propunit, inunit, mixedkeys)
106 break
107
108 return value
109
110 def _explode_gaia_plurals(self):
111 """Explode the gaia plurals."""
112 from translate.lang import data
113 for unit in self.inputstore.units:
114 if not unit.hasplural():
115 continue
116 if unit.isfuzzy() and not self.includefuzzy or not unit.istranslated():
117 continue
118
119 names = data.cldr_plural_categories
120 location = unit.getlocations()[0]
121 for category, text in zip(names, unit.target.strings):
122 # TODO: for now we assume all forms are present. We need to
123 # fill in the rest after mapping things to the proper CLDR names.
124 if category == 'zero':
125 # [zero] cases are translated as separate units
126 continue
127 new_unit = self.inputstore.addsourceunit(u"fish") # not used
128 new_location = '%s[%s]' % (location, category)
129 new_unit.addlocation(new_location)
130 new_unit.target = text
131 self.inputstore.locationindex[new_location] = new_unit
132
133 # We don't want the plural marker to be translated:
134 del self.inputstore.locationindex[location]
135
136 def convertline(self, line):
137 returnline = u""
138 # handle multiline msgid if we're in one
139 if self.inmultilinemsgid:
140 msgid = quote.rstripeol(line).strip()
141 # see if there's more
142 self.inmultilinemsgid = (msgid[-1:] == '\\')
143 # if we're echoing...
144 if self.inecho:
145 returnline = line
146 # otherwise, this could be a comment
147 elif line.strip()[:1] == '#':
148 returnline = quote.rstripeol(line) + eol
149 else:
150 line = quote.rstripeol(line)
151 delimiter_char, delimiter_pos = self.personality.find_delimiter(line)
152 if quote.rstripeol(line)[-1:] == '\\':
153 self.inmultilinemsgid = True
154 if delimiter_pos == -1:
155 key = self.personality.key_strip(line)
156 delimiter = " %s " % self.personality.delimiters[0]
157 else:
158 key = self.personality.key_strip(line[:delimiter_pos])
159 # Calculate space around the equal sign
160 prespace = line[line.find(' ', len(key)):delimiter_pos]
161 postspacestart = len(line[delimiter_pos+1:])
162 postspaceend = len(line[delimiter_pos+1:].lstrip())
163 postspace = line[delimiter_pos+1:delimiter_pos+(postspacestart-postspaceend)+1]
164 delimiter = prespace + delimiter_char + postspace
165 if key in self.inputstore.locationindex:
166 unit = self.inputstore.locationindex[key]
167 if unit is None or not unit.istranslated() and bool(unit.source) and self.remove_untranslated:
168 returnline = u""
169 self.inecho = False
170 else:
171 if unit.isfuzzy() and not self.includefuzzy or len(unit.target) == 0:
172 value = unit.source
173 else:
174 value = self._handle_accesskeys(unit, key)
175 self.inecho = False
176 assert isinstance(value, str)
177 returnline = "%(key)s%(del)s%(value)s%(term)s%(eol)s" % {
178 "key": "%s%s%s" % (self.personality.key_wrap_char,
179 key,
180 self.personality.key_wrap_char),
181 "del": delimiter,
182 "value": "%s%s%s" % (self.personality.value_wrap_char,
183 self.personality.encode(value),
184 self.personality.value_wrap_char),
185 "term": self.personality.pair_terminator,
186 "eol": eol,
187 }
188 else:
189 self.inecho = True
190 returnline = line + eol
191 assert isinstance(returnline, str)
192 return returnline
193
194
195 def convertstrings(inputfile, outputfile, templatefile, personality="strings",
196 includefuzzy=False, encoding=None, outputthreshold=None,
197 remove_untranslated=False):
198 """.strings specific convertor function"""
199 return convertprop(inputfile, outputfile, templatefile,
200 personality="strings", includefuzzy=includefuzzy,
201 encoding=encoding, outputthreshold=outputthreshold,
202 remove_untranslated=remove_untranslated)
203
204
205 def convertmozillaprop(inputfile, outputfile, templatefile,
206 includefuzzy=False, remove_untranslated=False,
207 outputthreshold=None):
208 """Mozilla specific convertor function"""
209 return convertprop(inputfile, outputfile, templatefile,
210 personality="mozilla", includefuzzy=includefuzzy,
211 remove_untranslated=remove_untranslated,
212 outputthreshold=outputthreshold)
213
214
215 def convertprop(inputfile, outputfile, templatefile, personality="java",
216 includefuzzy=False, encoding=None, remove_untranslated=False,
217 outputthreshold=None):
218 inputstore = po.pofile(inputfile)
219
220 if not convert.should_output_store(inputstore, outputthreshold):
221 return False
222
223 if templatefile is None:
224 raise ValueError("must have template file for properties files")
225 # convertor = po2prop()
226 else:
227 convertor = reprop(templatefile, inputstore, personality, encoding,
228 remove_untranslated)
229 outputprop = convertor.convertstore(includefuzzy)
230 outputfile.write(outputprop)
231 return True
232
233
234 formats = {
235 ("po", "properties"): ("properties", convertprop),
236 ("po", "lang"): ("lang", convertprop),
237 ("po", "strings"): ("strings", convertstrings),
238 }
239
240
241 def main(argv=None):
242 # handle command line options
243 parser = convert.ConvertOptionParser(formats, usetemplates=True,
244 description=__doc__)
245 parser.add_option(
246 "", "--personality", dest="personality",
247 default=properties.default_dialect, type="choice",
248 choices=list(properties.dialects),
249 help="override the input file format: %s (for .properties files, default: %s)" % (
250 ", ".join(properties.dialects), properties.default_dialect),
251 metavar="TYPE")
252 parser.add_option(
253 "", "--encoding", dest="encoding", default=None,
254 help="override the encoding set by the personality",
255 metavar="ENCODING")
256 parser.add_threshold_option()
257 parser.add_fuzzy_option()
258 parser.add_remove_untranslated_option()
259 parser.passthrough.append("personality")
260 parser.passthrough.append("encoding")
261 parser.run(argv)
262
263
264 if __name__ == '__main__':
265 main()
266
[end of translate/convert/po2prop.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/translate/convert/po2prop.py b/translate/convert/po2prop.py
--- a/translate/convert/po2prop.py
+++ b/translate/convert/po2prop.py
@@ -178,7 +178,7 @@
"key": "%s%s%s" % (self.personality.key_wrap_char,
key,
self.personality.key_wrap_char),
- "del": delimiter,
+ "del": delimiter if delimiter_pos != -1 or value else "",
"value": "%s%s%s" % (self.personality.value_wrap_char,
self.personality.encode(value),
self.personality.value_wrap_char),
| {"golden_diff": "diff --git a/translate/convert/po2prop.py b/translate/convert/po2prop.py\n--- a/translate/convert/po2prop.py\n+++ b/translate/convert/po2prop.py\n@@ -178,7 +178,7 @@\n \"key\": \"%s%s%s\" % (self.personality.key_wrap_char,\n key,\n self.personality.key_wrap_char),\n- \"del\": delimiter,\n+ \"del\": delimiter if delimiter_pos != -1 or value else \"\",\n \"value\": \"%s%s%s\" % (self.personality.value_wrap_char,\n self.personality.encode(value),\n self.personality.value_wrap_char),\n", "issue": "Fix properties roundtrip\nAfter #3607 some changes need to be introduced in order to fix properties roundtrip. More details in https://github.com/translate/translate/pull/3607#issuecomment-291440437 \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2002-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Convert Gettext PO localization files to Java/Mozilla .properties files.\n\nSee: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/prop2po.html\nfor examples and usage instructions.\n\"\"\"\n\nimport warnings\n\nfrom translate.convert import accesskey, convert\nfrom translate.misc import quote\nfrom translate.storage import po, properties\n\n\neol = u\"\\n\"\n\n\ndef applytranslation(key, propunit, inunit, mixedkeys):\n \"\"\"applies the translation for key in the po unit to the prop unit\"\"\"\n # this converts the po-style string to a prop-style string\n value = inunit.target\n # handle mixed keys\n for labelsuffix in properties.labelsuffixes:\n if key.endswith(labelsuffix):\n if key in mixedkeys:\n value, akey = accesskey.extract(value)\n break\n else:\n for akeysuffix in properties.accesskeysuffixes:\n if key.endswith(akeysuffix):\n if key in mixedkeys:\n label, value = accesskey.extract(value)\n if not value:\n warnings.warn(\"Could not find accesskey for %s\" % key)\n # Use the source language accesskey\n label, value = accesskey.extract(inunit.source)\n else:\n original = propunit.source\n # For the sake of diffs we keep the case of the\n # accesskey the same if we know the translation didn't\n # change. Casing matters in XUL.\n if value == propunit.source and original.lower() == value.lower():\n if original.isupper():\n value = value.upper()\n elif original.islower():\n value = value.lower()\n return value\n\n\nclass reprop:\n\n def __init__(self, templatefile, inputstore, personality, encoding=None,\n remove_untranslated=False):\n self.templatefile = templatefile\n self.inputstore = inputstore\n self.personality = properties.get_dialect(personality)\n self.encoding = encoding\n if self.encoding is None:\n self.encoding = self.personality.default_encoding\n self.remove_untranslated = remove_untranslated\n self.mixer = accesskey.UnitMixer(properties.labelsuffixes,\n properties.accesskeysuffixes)\n\n def convertstore(self, includefuzzy=False):\n self.includefuzzy = includefuzzy\n self.inmultilinemsgid = False\n self.inecho = False\n self.inputstore.makeindex()\n if self.personality.name == \"gaia\":\n self._explode_gaia_plurals()\n outputlines = []\n # Readlines doesn't work for UTF-16, we read() and splitlines(keepends) instead\n content = self.templatefile.read().decode(self.encoding)\n for line in content.splitlines(True):\n outputstr = self.convertline(line)\n outputlines.append(outputstr)\n return u\"\".join(outputlines).encode(self.encoding)\n\n def _handle_accesskeys(self, inunit, currkey):\n value = inunit.target\n if self.personality.name == \"mozilla\":\n keys = inunit.getlocations()\n mixedkeys = self.mixer.match_entities(keys)\n for key in keys:\n if key == currkey and key in self.inputstore.locationindex:\n propunit = self.inputstore.locationindex[key] # find the prop\n value = applytranslation(key, propunit, inunit, mixedkeys)\n break\n\n return value\n\n def _explode_gaia_plurals(self):\n \"\"\"Explode the gaia plurals.\"\"\"\n from translate.lang import data\n for unit in self.inputstore.units:\n if not unit.hasplural():\n continue\n if unit.isfuzzy() and not self.includefuzzy or not unit.istranslated():\n continue\n\n names = data.cldr_plural_categories\n location = unit.getlocations()[0]\n for category, text in zip(names, unit.target.strings):\n # TODO: for now we assume all forms are present. We need to\n # fill in the rest after mapping things to the proper CLDR names.\n if category == 'zero':\n # [zero] cases are translated as separate units\n continue\n new_unit = self.inputstore.addsourceunit(u\"fish\") # not used\n new_location = '%s[%s]' % (location, category)\n new_unit.addlocation(new_location)\n new_unit.target = text\n self.inputstore.locationindex[new_location] = new_unit\n\n # We don't want the plural marker to be translated:\n del self.inputstore.locationindex[location]\n\n def convertline(self, line):\n returnline = u\"\"\n # handle multiline msgid if we're in one\n if self.inmultilinemsgid:\n msgid = quote.rstripeol(line).strip()\n # see if there's more\n self.inmultilinemsgid = (msgid[-1:] == '\\\\')\n # if we're echoing...\n if self.inecho:\n returnline = line\n # otherwise, this could be a comment\n elif line.strip()[:1] == '#':\n returnline = quote.rstripeol(line) + eol\n else:\n line = quote.rstripeol(line)\n delimiter_char, delimiter_pos = self.personality.find_delimiter(line)\n if quote.rstripeol(line)[-1:] == '\\\\':\n self.inmultilinemsgid = True\n if delimiter_pos == -1:\n key = self.personality.key_strip(line)\n delimiter = \" %s \" % self.personality.delimiters[0]\n else:\n key = self.personality.key_strip(line[:delimiter_pos])\n # Calculate space around the equal sign\n prespace = line[line.find(' ', len(key)):delimiter_pos]\n postspacestart = len(line[delimiter_pos+1:])\n postspaceend = len(line[delimiter_pos+1:].lstrip())\n postspace = line[delimiter_pos+1:delimiter_pos+(postspacestart-postspaceend)+1]\n delimiter = prespace + delimiter_char + postspace\n if key in self.inputstore.locationindex:\n unit = self.inputstore.locationindex[key]\n if unit is None or not unit.istranslated() and bool(unit.source) and self.remove_untranslated:\n returnline = u\"\"\n self.inecho = False\n else:\n if unit.isfuzzy() and not self.includefuzzy or len(unit.target) == 0:\n value = unit.source\n else:\n value = self._handle_accesskeys(unit, key)\n self.inecho = False\n assert isinstance(value, str)\n returnline = \"%(key)s%(del)s%(value)s%(term)s%(eol)s\" % {\n \"key\": \"%s%s%s\" % (self.personality.key_wrap_char,\n key,\n self.personality.key_wrap_char),\n \"del\": delimiter,\n \"value\": \"%s%s%s\" % (self.personality.value_wrap_char,\n self.personality.encode(value),\n self.personality.value_wrap_char),\n \"term\": self.personality.pair_terminator,\n \"eol\": eol,\n }\n else:\n self.inecho = True\n returnline = line + eol\n assert isinstance(returnline, str)\n return returnline\n\n\ndef convertstrings(inputfile, outputfile, templatefile, personality=\"strings\",\n includefuzzy=False, encoding=None, outputthreshold=None,\n remove_untranslated=False):\n \"\"\".strings specific convertor function\"\"\"\n return convertprop(inputfile, outputfile, templatefile,\n personality=\"strings\", includefuzzy=includefuzzy,\n encoding=encoding, outputthreshold=outputthreshold,\n remove_untranslated=remove_untranslated)\n\n\ndef convertmozillaprop(inputfile, outputfile, templatefile,\n includefuzzy=False, remove_untranslated=False,\n outputthreshold=None):\n \"\"\"Mozilla specific convertor function\"\"\"\n return convertprop(inputfile, outputfile, templatefile,\n personality=\"mozilla\", includefuzzy=includefuzzy,\n remove_untranslated=remove_untranslated,\n outputthreshold=outputthreshold)\n\n\ndef convertprop(inputfile, outputfile, templatefile, personality=\"java\",\n includefuzzy=False, encoding=None, remove_untranslated=False,\n outputthreshold=None):\n inputstore = po.pofile(inputfile)\n\n if not convert.should_output_store(inputstore, outputthreshold):\n return False\n\n if templatefile is None:\n raise ValueError(\"must have template file for properties files\")\n # convertor = po2prop()\n else:\n convertor = reprop(templatefile, inputstore, personality, encoding,\n remove_untranslated)\n outputprop = convertor.convertstore(includefuzzy)\n outputfile.write(outputprop)\n return True\n\n\nformats = {\n (\"po\", \"properties\"): (\"properties\", convertprop),\n (\"po\", \"lang\"): (\"lang\", convertprop),\n (\"po\", \"strings\"): (\"strings\", convertstrings),\n}\n\n\ndef main(argv=None):\n # handle command line options\n parser = convert.ConvertOptionParser(formats, usetemplates=True,\n description=__doc__)\n parser.add_option(\n \"\", \"--personality\", dest=\"personality\",\n default=properties.default_dialect, type=\"choice\",\n choices=list(properties.dialects),\n help=\"override the input file format: %s (for .properties files, default: %s)\" % (\n \", \".join(properties.dialects), properties.default_dialect),\n metavar=\"TYPE\")\n parser.add_option(\n \"\", \"--encoding\", dest=\"encoding\", default=None,\n help=\"override the encoding set by the personality\",\n metavar=\"ENCODING\")\n parser.add_threshold_option()\n parser.add_fuzzy_option()\n parser.add_remove_untranslated_option()\n parser.passthrough.append(\"personality\")\n parser.passthrough.append(\"encoding\")\n parser.run(argv)\n\n\nif __name__ == '__main__':\n main()\n", "path": "translate/convert/po2prop.py"}]} | 3,603 | 147 |
gh_patches_debug_39694 | rasdani/github-patches | git_diff | NVIDIA__TransformerEngine-313 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Export transformer_engine.pytorch.LayerNorm to ONNX gives all zero output
After exporting `transformer_engine.pytorch.LayerNorm` to ONNX, it always give tensors with correct shape but filled with 0.
Example code:
```
import torch
import onnxruntime
from transformer_engine import pytorch as te
model = te.LayerNorm(1000).cuda().eval()
x_sample = torch.randn(3000, 1000)
with torch.inference_mode():
torch_out = model(x_sample.cuda()) # the result is correct
with torch.inference_mode():
with te.onnx_export(True):
torch.onnx.export(model, x_sample.cuda(), "layer_norm.onnx", dynamic_axes={"input": {0: "batch_size"}, "output": {0: "batch_size"}}, opset_version=17, input_names=["input"], output_names=["output"])
ort_session = onnxruntime.InferenceSession("layer_norm.onnx", providers=["CPUExecutionProvider"])
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x_sample)}
ort_output = ort_session.run(None, ort_inputs)[0]
# ort_output is all zero
```
Other pytorch modules like `transformer_engine.pytorch.LayerNormLinear` gives correct results.
OS: RHEL 7
Python: 3.10.11
TransformerEngine: 0.9
Pytorch: 2.0.1+cu118
GPU: 4090
</issue>
<code>
[start of transformer_engine/pytorch/module/layernorm.py]
1 # Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 #
3 # See LICENSE for license information.
4
5 """LayerNorm API"""
6 import os
7 from typing import Union, Tuple, Any, Mapping, Optional
8
9 import torch
10 from torch.nn.parameter import Parameter
11 from torch.nn import init
12
13 import transformer_engine_extensions as tex
14
15
16 __all__ = ["LayerNorm"]
17
18
19 class _LayerNorm(torch.autograd.Function):
20 """functional LayerNorm"""
21
22 @staticmethod
23 def forward(
24 ctx,
25 inp: torch.Tensor,
26 ln_weight: torch.Tensor,
27 ln_bias: torch.Tensor,
28 eps: float,
29 fwd_ln_sm_margin: int,
30 bwd_ln_sm_margin: int,
31 zero_centered_gamma: bool,
32 ) -> torch.Tensor:
33 # Make sure input dimensions are compatible
34 in_features = ln_weight.numel()
35 assert inp.is_cuda, "TransformerEngine needs CUDA."
36 assert inp.shape[-1] == in_features, "LayerNorm not possible"
37 inputmat = inp.view((-1, in_features))
38
39 ln_out, mu, rsigma = tex.layernorm_fwd(inputmat, ln_weight,
40 ln_bias, eps, fwd_ln_sm_margin,
41 zero_centered_gamma)
42 ctx.save_for_backward(inputmat, ln_weight, mu, rsigma)
43 ctx.inp_shape = inp.shape
44 ctx.bwd_ln_sm_margin = bwd_ln_sm_margin
45 ctx.zero_centered_gamma = zero_centered_gamma
46 return ln_out.view_as(inp)
47
48 @staticmethod
49 def backward(
50 ctx, grad_output: torch.Tensor
51 ) -> Tuple[Union[torch.Tensor, None], ...]:
52 inputmat, ln_weight, mu, rsigma = ctx.saved_tensors
53 grad_output = grad_output.contiguous()
54 d_ln_out = grad_output.view(inputmat.shape)
55 dxmat, dgamma, dbeta = tex.layernorm_bwd(
56 d_ln_out, inputmat, mu, rsigma, ln_weight,
57 ctx.bwd_ln_sm_margin, ctx.zero_centered_gamma
58 )
59 return dxmat.view(ctx.inp_shape), dgamma, dbeta, None, None, None, None
60
61
62 class LayerNorm(torch.nn.Module):
63 r"""
64 Applies Layer Normalization over a mini-batch of inputs as described in
65 the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`__
66
67 .. math::
68 y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \varepsilon}} * \gamma + \beta
69
70 :math:`\gamma` and :math:`\beta` are learnable affine transform parameters of
71 size :attr:`hidden_size`
72
73 Parameters
74 ----------
75 hidden_size : int
76 size of each input sample.
77 eps : float, default = 1e-5
78 a value added to the denominator of layer normalization for numerical stability.
79 sequence_parallel : bool, default = `False`
80 if set to `True`, uses sequence parallelism.
81 params_dtype : torch.dtype, default = `torch.get_default_dtype()`
82 it controls the type used to allocate the initial parameters. Useful when
83 the model is trained with lower precision and the original FP32 parameters
84 would not fit in GPU memory.
85 zero_centered_gamma : bool, default = 'False'
86 if set to 'True', gamma parameter in LayerNorm is initialized to 0 and
87 the LayerNorm formula changes to
88
89 .. math::
90 y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \varepsilon}} *
91 (1 + \gamma) + \beta
92 """
93
94 def __init__(
95 self,
96 hidden_size: int,
97 eps: float = 1e-5,
98 sequence_parallel: bool = False,
99 params_dtype: Optional[torch.dtype] = None,
100 zero_centered_gamma: bool = False,
101 ) -> None:
102 super().__init__()
103 params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
104 self.eps = eps
105 self.zero_centered_gamma = zero_centered_gamma
106 self.weight = Parameter(
107 torch.empty(
108 hidden_size,
109 device=torch.cuda.current_device(),
110 dtype=params_dtype,
111 )
112 )
113 self.bias = Parameter(
114 torch.empty(
115 hidden_size,
116 device=torch.cuda.current_device(),
117 dtype=params_dtype,
118 )
119 )
120 setattr(self.weight, "sequence_parallel", sequence_parallel)
121 setattr(self.bias, "sequence_parallel", sequence_parallel)
122 self.reset_layer_norm_parameters()
123
124 # These many SMs are subtracted from the total SM count when calling forward
125 # and backward LayerNorm C APIs. These envvars can be used to prevent the LN
126 # kernels from using all SMs in the device. This is useful for cases such as
127 # communication overlap with LN.
128 self.fwd_ln_sm_margin = int(os.getenv("NVTE_FWD_LAYERNORM_SM_MARGIN", "0"))
129 self.bwd_ln_sm_margin = int(os.getenv("NVTE_BWD_LAYERNORM_SM_MARGIN", "0"))
130
131 def load_state_dict(
132 self,
133 state_dict: Mapping[str, Any],
134 strict: bool = True,
135 ) -> None:
136 """Override PyTorch loader to maintain backward compatibility
137 with previous version of LayerNorm parameter names.
138 """
139 if "layer_norm_weight" in state_dict:
140 state_dict["weight"] = state_dict["layer_norm_weight"]
141 del state_dict["layer_norm_weight"]
142 if "layer_norm_bias" in state_dict:
143 state_dict["bias"] = state_dict["layer_norm_bias"]
144 del state_dict["layer_norm_bias"]
145
146 super().load_state_dict(state_dict, strict)
147
148 def reset_layer_norm_parameters(self) -> None:
149 """Init LN params"""
150 if not self.zero_centered_gamma:
151 init.ones_(self.weight)
152 else:
153 init.zeros_(self.weight)
154 init.zeros_(self.bias)
155
156
157 def forward(self, inp: torch.Tensor) -> torch.Tensor:
158 """LayerNorm FWD"""
159 # Maintain backward compatibility.
160 if hasattr(self, "layer_norm_weight"):
161 setattr(self, "weight", self.layer_norm_weight)
162 if hasattr(self, "layer_norm_bias"):
163 setattr(self, "bias", self.layer_norm_bias)
164
165 return _LayerNorm.apply(
166 inp,
167 self.weight,
168 self.bias,
169 self.eps,
170 self.fwd_ln_sm_margin,
171 self.bwd_ln_sm_margin,
172 self.zero_centered_gamma
173 )
174
[end of transformer_engine/pytorch/module/layernorm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/transformer_engine/pytorch/module/layernorm.py b/transformer_engine/pytorch/module/layernorm.py
--- a/transformer_engine/pytorch/module/layernorm.py
+++ b/transformer_engine/pytorch/module/layernorm.py
@@ -11,7 +11,9 @@
from torch.nn import init
import transformer_engine_extensions as tex
-
+from ..cpp_extensions import (
+ layernorm_fwd_inf,
+ )
__all__ = ["LayerNorm"]
@@ -29,6 +31,7 @@
fwd_ln_sm_margin: int,
bwd_ln_sm_margin: int,
zero_centered_gamma: bool,
+ is_grad_enabled: bool,
) -> torch.Tensor:
# Make sure input dimensions are compatible
in_features = ln_weight.numel()
@@ -36,13 +39,16 @@
assert inp.shape[-1] == in_features, "LayerNorm not possible"
inputmat = inp.view((-1, in_features))
- ln_out, mu, rsigma = tex.layernorm_fwd(inputmat, ln_weight,
- ln_bias, eps, fwd_ln_sm_margin,
- zero_centered_gamma)
- ctx.save_for_backward(inputmat, ln_weight, mu, rsigma)
- ctx.inp_shape = inp.shape
- ctx.bwd_ln_sm_margin = bwd_ln_sm_margin
- ctx.zero_centered_gamma = zero_centered_gamma
+ if is_grad_enabled:
+ ln_out, mu, rsigma = tex.layernorm_fwd(inputmat, ln_weight,
+ ln_bias, eps, fwd_ln_sm_margin, zero_centered_gamma)
+ ctx.save_for_backward(inputmat, ln_weight, mu, rsigma)
+ ctx.inp_shape = inp.shape
+ ctx.bwd_ln_sm_margin = bwd_ln_sm_margin
+ ctx.zero_centered_gamma = zero_centered_gamma
+ else:
+ ln_out, mu, rsigma = layernorm_fwd_inf(inputmat, ln_weight,
+ ln_bias, eps, zero_centered_gamma), None, None
return ln_out.view_as(inp)
@staticmethod
@@ -56,7 +62,7 @@
d_ln_out, inputmat, mu, rsigma, ln_weight,
ctx.bwd_ln_sm_margin, ctx.zero_centered_gamma
)
- return dxmat.view(ctx.inp_shape), dgamma, dbeta, None, None, None, None
+ return dxmat.view(ctx.inp_shape), dgamma, dbeta, None, None, None, None, None
class LayerNorm(torch.nn.Module):
@@ -162,12 +168,22 @@
if hasattr(self, "layer_norm_bias"):
setattr(self, "bias", self.layer_norm_bias)
- return _LayerNorm.apply(
+ if torch.is_grad_enabled():
+ fwd_fn = _LayerNorm.apply
+ args = []
+ else:
+ fwd_fn = _LayerNorm.forward
+ args = [None]
+
+ args += (
inp,
self.weight,
self.bias,
self.eps,
self.fwd_ln_sm_margin,
self.bwd_ln_sm_margin,
- self.zero_centered_gamma
+ self.zero_centered_gamma,
+ torch.is_grad_enabled()
)
+
+ return fwd_fn(*args)
| {"golden_diff": "diff --git a/transformer_engine/pytorch/module/layernorm.py b/transformer_engine/pytorch/module/layernorm.py\n--- a/transformer_engine/pytorch/module/layernorm.py\n+++ b/transformer_engine/pytorch/module/layernorm.py\n@@ -11,7 +11,9 @@\n from torch.nn import init\n \n import transformer_engine_extensions as tex\n-\n+from ..cpp_extensions import (\n+ layernorm_fwd_inf,\n+ )\n \n __all__ = [\"LayerNorm\"]\n \n@@ -29,6 +31,7 @@\n fwd_ln_sm_margin: int,\n bwd_ln_sm_margin: int,\n zero_centered_gamma: bool,\n+ is_grad_enabled: bool,\n ) -> torch.Tensor:\n # Make sure input dimensions are compatible\n in_features = ln_weight.numel()\n@@ -36,13 +39,16 @@\n assert inp.shape[-1] == in_features, \"LayerNorm not possible\"\n inputmat = inp.view((-1, in_features))\n \n- ln_out, mu, rsigma = tex.layernorm_fwd(inputmat, ln_weight,\n- ln_bias, eps, fwd_ln_sm_margin,\n- zero_centered_gamma)\n- ctx.save_for_backward(inputmat, ln_weight, mu, rsigma)\n- ctx.inp_shape = inp.shape\n- ctx.bwd_ln_sm_margin = bwd_ln_sm_margin\n- ctx.zero_centered_gamma = zero_centered_gamma\n+ if is_grad_enabled:\n+ ln_out, mu, rsigma = tex.layernorm_fwd(inputmat, ln_weight,\n+ ln_bias, eps, fwd_ln_sm_margin, zero_centered_gamma)\n+ ctx.save_for_backward(inputmat, ln_weight, mu, rsigma)\n+ ctx.inp_shape = inp.shape\n+ ctx.bwd_ln_sm_margin = bwd_ln_sm_margin\n+ ctx.zero_centered_gamma = zero_centered_gamma\n+ else:\n+ ln_out, mu, rsigma = layernorm_fwd_inf(inputmat, ln_weight,\n+ ln_bias, eps, zero_centered_gamma), None, None\n return ln_out.view_as(inp)\n \n @staticmethod\n@@ -56,7 +62,7 @@\n d_ln_out, inputmat, mu, rsigma, ln_weight,\n ctx.bwd_ln_sm_margin, ctx.zero_centered_gamma\n )\n- return dxmat.view(ctx.inp_shape), dgamma, dbeta, None, None, None, None\n+ return dxmat.view(ctx.inp_shape), dgamma, dbeta, None, None, None, None, None\n \n \n class LayerNorm(torch.nn.Module):\n@@ -162,12 +168,22 @@\n if hasattr(self, \"layer_norm_bias\"):\n setattr(self, \"bias\", self.layer_norm_bias)\n \n- return _LayerNorm.apply(\n+ if torch.is_grad_enabled():\n+ fwd_fn = _LayerNorm.apply\n+ args = []\n+ else:\n+ fwd_fn = _LayerNorm.forward\n+ args = [None]\n+\n+ args += (\n inp,\n self.weight,\n self.bias,\n self.eps,\n self.fwd_ln_sm_margin,\n self.bwd_ln_sm_margin,\n- self.zero_centered_gamma\n+ self.zero_centered_gamma,\n+ torch.is_grad_enabled()\n )\n+\n+ return fwd_fn(*args)\n", "issue": "Export transformer_engine.pytorch.LayerNorm to ONNX gives all zero output\nAfter exporting `transformer_engine.pytorch.LayerNorm` to ONNX, it always give tensors with correct shape but filled with 0.\r\n\r\nExample code:\r\n```\r\nimport torch\r\nimport onnxruntime\r\nfrom transformer_engine import pytorch as te\r\n\r\nmodel = te.LayerNorm(1000).cuda().eval()\r\nx_sample = torch.randn(3000, 1000)\r\nwith torch.inference_mode():\r\n torch_out = model(x_sample.cuda()) # the result is correct\r\n\r\nwith torch.inference_mode():\r\n with te.onnx_export(True):\r\n torch.onnx.export(model, x_sample.cuda(), \"layer_norm.onnx\", dynamic_axes={\"input\": {0: \"batch_size\"}, \"output\": {0: \"batch_size\"}}, opset_version=17, input_names=[\"input\"], output_names=[\"output\"])\r\n\r\nort_session = onnxruntime.InferenceSession(\"layer_norm.onnx\", providers=[\"CPUExecutionProvider\"])\r\nort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x_sample)}\r\nort_output = ort_session.run(None, ort_inputs)[0]\r\n# ort_output is all zero\r\n```\r\n\r\nOther pytorch modules like `transformer_engine.pytorch.LayerNormLinear` gives correct results.\r\n\r\n\r\nOS: RHEL 7\r\nPython: 3.10.11\r\nTransformerEngine: 0.9\r\nPytorch: 2.0.1+cu118\r\nGPU: 4090\n", "before_files": [{"content": "# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# See LICENSE for license information.\n\n\"\"\"LayerNorm API\"\"\"\nimport os\nfrom typing import Union, Tuple, Any, Mapping, Optional\n\nimport torch\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import init\n\nimport transformer_engine_extensions as tex\n\n\n__all__ = [\"LayerNorm\"]\n\n\nclass _LayerNorm(torch.autograd.Function):\n \"\"\"functional LayerNorm\"\"\"\n\n @staticmethod\n def forward(\n ctx,\n inp: torch.Tensor,\n ln_weight: torch.Tensor,\n ln_bias: torch.Tensor,\n eps: float,\n fwd_ln_sm_margin: int,\n bwd_ln_sm_margin: int,\n zero_centered_gamma: bool,\n ) -> torch.Tensor:\n # Make sure input dimensions are compatible\n in_features = ln_weight.numel()\n assert inp.is_cuda, \"TransformerEngine needs CUDA.\"\n assert inp.shape[-1] == in_features, \"LayerNorm not possible\"\n inputmat = inp.view((-1, in_features))\n\n ln_out, mu, rsigma = tex.layernorm_fwd(inputmat, ln_weight,\n ln_bias, eps, fwd_ln_sm_margin,\n zero_centered_gamma)\n ctx.save_for_backward(inputmat, ln_weight, mu, rsigma)\n ctx.inp_shape = inp.shape\n ctx.bwd_ln_sm_margin = bwd_ln_sm_margin\n ctx.zero_centered_gamma = zero_centered_gamma\n return ln_out.view_as(inp)\n\n @staticmethod\n def backward(\n ctx, grad_output: torch.Tensor\n ) -> Tuple[Union[torch.Tensor, None], ...]:\n inputmat, ln_weight, mu, rsigma = ctx.saved_tensors\n grad_output = grad_output.contiguous()\n d_ln_out = grad_output.view(inputmat.shape)\n dxmat, dgamma, dbeta = tex.layernorm_bwd(\n d_ln_out, inputmat, mu, rsigma, ln_weight,\n ctx.bwd_ln_sm_margin, ctx.zero_centered_gamma\n )\n return dxmat.view(ctx.inp_shape), dgamma, dbeta, None, None, None, None\n\n\nclass LayerNorm(torch.nn.Module):\n r\"\"\"\n Applies Layer Normalization over a mini-batch of inputs as described in\n the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`__\n\n .. math::\n y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\varepsilon}} * \\gamma + \\beta\n\n :math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of\n size :attr:`hidden_size`\n\n Parameters\n ----------\n hidden_size : int\n size of each input sample.\n eps : float, default = 1e-5\n a value added to the denominator of layer normalization for numerical stability.\n sequence_parallel : bool, default = `False`\n if set to `True`, uses sequence parallelism.\n params_dtype : torch.dtype, default = `torch.get_default_dtype()`\n it controls the type used to allocate the initial parameters. Useful when\n the model is trained with lower precision and the original FP32 parameters\n would not fit in GPU memory.\n zero_centered_gamma : bool, default = 'False'\n if set to 'True', gamma parameter in LayerNorm is initialized to 0 and\n the LayerNorm formula changes to\n\n .. math::\n y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\varepsilon}} *\n (1 + \\gamma) + \\beta\n \"\"\"\n\n def __init__(\n self,\n hidden_size: int,\n eps: float = 1e-5,\n sequence_parallel: bool = False,\n params_dtype: Optional[torch.dtype] = None,\n zero_centered_gamma: bool = False,\n ) -> None:\n super().__init__()\n params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype\n self.eps = eps\n self.zero_centered_gamma = zero_centered_gamma\n self.weight = Parameter(\n torch.empty(\n hidden_size,\n device=torch.cuda.current_device(),\n dtype=params_dtype,\n )\n )\n self.bias = Parameter(\n torch.empty(\n hidden_size,\n device=torch.cuda.current_device(),\n dtype=params_dtype,\n )\n )\n setattr(self.weight, \"sequence_parallel\", sequence_parallel)\n setattr(self.bias, \"sequence_parallel\", sequence_parallel)\n self.reset_layer_norm_parameters()\n\n # These many SMs are subtracted from the total SM count when calling forward\n # and backward LayerNorm C APIs. These envvars can be used to prevent the LN\n # kernels from using all SMs in the device. This is useful for cases such as\n # communication overlap with LN.\n self.fwd_ln_sm_margin = int(os.getenv(\"NVTE_FWD_LAYERNORM_SM_MARGIN\", \"0\"))\n self.bwd_ln_sm_margin = int(os.getenv(\"NVTE_BWD_LAYERNORM_SM_MARGIN\", \"0\"))\n\n def load_state_dict(\n self,\n state_dict: Mapping[str, Any],\n strict: bool = True,\n ) -> None:\n \"\"\"Override PyTorch loader to maintain backward compatibility\n with previous version of LayerNorm parameter names.\n \"\"\"\n if \"layer_norm_weight\" in state_dict:\n state_dict[\"weight\"] = state_dict[\"layer_norm_weight\"]\n del state_dict[\"layer_norm_weight\"]\n if \"layer_norm_bias\" in state_dict:\n state_dict[\"bias\"] = state_dict[\"layer_norm_bias\"]\n del state_dict[\"layer_norm_bias\"]\n\n super().load_state_dict(state_dict, strict)\n\n def reset_layer_norm_parameters(self) -> None:\n \"\"\"Init LN params\"\"\"\n if not self.zero_centered_gamma:\n init.ones_(self.weight)\n else:\n init.zeros_(self.weight)\n init.zeros_(self.bias)\n\n\n def forward(self, inp: torch.Tensor) -> torch.Tensor:\n \"\"\"LayerNorm FWD\"\"\"\n # Maintain backward compatibility.\n if hasattr(self, \"layer_norm_weight\"):\n setattr(self, \"weight\", self.layer_norm_weight)\n if hasattr(self, \"layer_norm_bias\"):\n setattr(self, \"bias\", self.layer_norm_bias)\n\n return _LayerNorm.apply(\n inp,\n self.weight,\n self.bias,\n self.eps,\n self.fwd_ln_sm_margin,\n self.bwd_ln_sm_margin,\n self.zero_centered_gamma\n )\n", "path": "transformer_engine/pytorch/module/layernorm.py"}]} | 2,716 | 743 |
gh_patches_debug_20456 | rasdani/github-patches | git_diff | translate__pootle-5699 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lost timeSince data for suggestions
</issue>
<code>
[start of pootle/apps/pootle_misc/templatetags/locale.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django import template
10 from django.utils.formats import get_format
11 from django.utils.translation import trans_real
12
13 from pootle.core.utils import dateformat
14
15
16 register = template.Library()
17
18
19 @register.simple_tag
20 def locale_dir():
21 """Returns current locale's direction."""
22 return trans_real.get_language_bidi() and "rtl" or "ltr"
23
24
25 @register.filter(name='dateformat')
26 def do_dateformat(value, use_format='c'):
27 """Formats a `value` date using `format`.
28
29 :param value: a datetime object.
30 :param use_format: a format string accepted by
31 :func:`django.utils.formats.get_format` or
32 :func:`django.utils.dateformat.format`. If none is set, the current
33 locale's default format will be used.
34 """
35 try:
36 use_format = get_format(use_format)
37 except AttributeError:
38 pass
39
40 return dateformat.format(value, use_format)
41
42
43 @register.simple_tag
44 def locale_align():
45 """Returns current locale's default alignment."""
46 return trans_real.get_language_bidi() and "right" or "left"
47
[end of pootle/apps/pootle_misc/templatetags/locale.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_misc/templatetags/locale.py b/pootle/apps/pootle_misc/templatetags/locale.py
--- a/pootle/apps/pootle_misc/templatetags/locale.py
+++ b/pootle/apps/pootle_misc/templatetags/locale.py
@@ -6,11 +6,14 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
+import calendar
+
from django import template
from django.utils.formats import get_format
from django.utils.translation import trans_real
from pootle.core.utils import dateformat
+from pootle.local.dates import timesince
register = template.Library()
@@ -40,6 +43,11 @@
return dateformat.format(value, use_format)
[email protected](name='relative_datetime_format')
+def do_relative_datetime_format(value):
+ return timesince(calendar.timegm(value.timetuple()))
+
+
@register.simple_tag
def locale_align():
"""Returns current locale's default alignment."""
| {"golden_diff": "diff --git a/pootle/apps/pootle_misc/templatetags/locale.py b/pootle/apps/pootle_misc/templatetags/locale.py\n--- a/pootle/apps/pootle_misc/templatetags/locale.py\n+++ b/pootle/apps/pootle_misc/templatetags/locale.py\n@@ -6,11 +6,14 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+import calendar\n+\n from django import template\n from django.utils.formats import get_format\n from django.utils.translation import trans_real\n \n from pootle.core.utils import dateformat\n+from pootle.local.dates import timesince\n \n \n register = template.Library()\n@@ -40,6 +43,11 @@\n return dateformat.format(value, use_format)\n \n \[email protected](name='relative_datetime_format')\n+def do_relative_datetime_format(value):\n+ return timesince(calendar.timegm(value.timetuple()))\n+\n+\n @register.simple_tag\n def locale_align():\n \"\"\"Returns current locale's default alignment.\"\"\"\n", "issue": "Lost timeSince data for suggestions\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django import template\nfrom django.utils.formats import get_format\nfrom django.utils.translation import trans_real\n\nfrom pootle.core.utils import dateformat\n\n\nregister = template.Library()\n\n\[email protected]_tag\ndef locale_dir():\n \"\"\"Returns current locale's direction.\"\"\"\n return trans_real.get_language_bidi() and \"rtl\" or \"ltr\"\n\n\[email protected](name='dateformat')\ndef do_dateformat(value, use_format='c'):\n \"\"\"Formats a `value` date using `format`.\n\n :param value: a datetime object.\n :param use_format: a format string accepted by\n :func:`django.utils.formats.get_format` or\n :func:`django.utils.dateformat.format`. If none is set, the current\n locale's default format will be used.\n \"\"\"\n try:\n use_format = get_format(use_format)\n except AttributeError:\n pass\n\n return dateformat.format(value, use_format)\n\n\[email protected]_tag\ndef locale_align():\n \"\"\"Returns current locale's default alignment.\"\"\"\n return trans_real.get_language_bidi() and \"right\" or \"left\"\n", "path": "pootle/apps/pootle_misc/templatetags/locale.py"}]} | 957 | 243 |
gh_patches_debug_1597 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-915 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix a few issues in Django example
The Django example has a few issues, fix them.
</issue>
<code>
[start of docs/examples/django/pages/views.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from django.http import HttpResponse
15
16 from opentelemetry import trace
17 from opentelemetry.sdk.trace import TracerProvider
18 from opentelemetry.sdk.trace.export import (
19 ConsoleSpanExporter,
20 SimpleExportSpanProcessor,
21 )
22
23 trace.set_tracer_provider(TracerProvider())
24 tracer = trace.get_tracer_provider().get_tracer(__name__)
25
26 trace.get_tracer_provider().add_span_processor(
27 SimpleExportSpanProcessor(ConsoleSpanExporter())
28 )
29
30
31 def home_page_view(request):
32 return HttpResponse("Hello, world")
33
[end of docs/examples/django/pages/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/django/pages/views.py b/docs/examples/django/pages/views.py
--- a/docs/examples/django/pages/views.py
+++ b/docs/examples/django/pages/views.py
@@ -21,7 +21,6 @@
)
trace.set_tracer_provider(TracerProvider())
-tracer = trace.get_tracer_provider().get_tracer(__name__)
trace.get_tracer_provider().add_span_processor(
SimpleExportSpanProcessor(ConsoleSpanExporter())
| {"golden_diff": "diff --git a/docs/examples/django/pages/views.py b/docs/examples/django/pages/views.py\n--- a/docs/examples/django/pages/views.py\n+++ b/docs/examples/django/pages/views.py\n@@ -21,7 +21,6 @@\n )\n \n trace.set_tracer_provider(TracerProvider())\n-tracer = trace.get_tracer_provider().get_tracer(__name__)\n \n trace.get_tracer_provider().add_span_processor(\n SimpleExportSpanProcessor(ConsoleSpanExporter())\n", "issue": "Fix a few issues in Django example\nThe Django example has a few issues, fix them.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom django.http import HttpResponse\n\nfrom opentelemetry import trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleExportSpanProcessor,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\ntrace.get_tracer_provider().add_span_processor(\n SimpleExportSpanProcessor(ConsoleSpanExporter())\n)\n\n\ndef home_page_view(request):\n return HttpResponse(\"Hello, world\")\n", "path": "docs/examples/django/pages/views.py"}]} | 851 | 99 |
gh_patches_debug_21112 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5171 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checkov v2.3.261 fails with CKV_AWS_356 for KMS actions which must specify 'all resources'
**Describe the issue**
Checkov v2.3.261 fails with CKV_AWS_356 highlights IAM policies which are overly permissive but is incorrectly identifying actions for KMS policies which need to be for all resources potentially scoped with conditional access per https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-services.html
Similar issue for https://github.com/bridgecrewio/checkov/issues/5134 where certain actions like 'list' require all resources.
**Examples**
```
data "aws_iam_policy_document" "myKmsKey" {
actions = [
"kms:GenerateDataKey",
"kms:Decrypt"
]
resources = [
"*"
]
condition {
test = "ArnEquals"
variable = "aws:SourceArn"
values = [
<SOME OTHER RESOURCE>.arn
]
}
}
}
```
**Version (please complete the following information):**
- Checkov Version 2.3.261
</issue>
<code>
[start of checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py]
1 from typing import Dict, List, Any
2
3 from checkov.common.util.data_structures_utils import pickle_deepcopy
4
5
6 def convert_terraform_conf_to_iam_policy(conf: Dict[str, List[Dict[str, Any]]]) -> Dict[str, List[Dict[str, Any]]]:
7 """
8 converts terraform parsed configuration to iam policy document
9 """
10 result = pickle_deepcopy(conf)
11 if "statement" in result.keys():
12 result["Statement"] = result.pop("statement")
13 for statement in result["Statement"]:
14 if "actions" in statement:
15 statement["Action"] = statement.pop("actions")[0]
16 if "resources" in statement:
17 statement["Resource"] = statement.pop("resources")[0]
18 if "not_actions" in statement:
19 statement["NotAction"] = statement.pop("not_actions")[0]
20 if "not_resources" in statement:
21 statement["NotResource"] = statement.pop("not_resources")[0]
22 if "effect" in statement:
23 statement["Effect"] = statement.pop("effect")[0]
24 if "effect" not in statement and "Effect" not in statement:
25 statement["Effect"] = "Allow"
26 return result
27
[end of checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
--- a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
+++ b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from typing import Dict, List, Any
from checkov.common.util.data_structures_utils import pickle_deepcopy
@@ -23,4 +25,13 @@
statement["Effect"] = statement.pop("effect")[0]
if "effect" not in statement and "Effect" not in statement:
statement["Effect"] = "Allow"
+ if "condition" in statement:
+ conditions = statement.pop("condition")
+ if conditions and isinstance(conditions, list):
+ statement["Condition"] = {}
+ for condition in conditions:
+ cond_operator = condition["test"][0]
+ cond_key = condition["variable"][0]
+ cond_value = condition["values"][0]
+ statement["Condition"].setdefault(cond_operator, {})[cond_key] = cond_value
return result
| {"golden_diff": "diff --git a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py\n--- a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py\n+++ b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py\n@@ -1,3 +1,5 @@\n+from __future__ import annotations\n+\n from typing import Dict, List, Any\n \n from checkov.common.util.data_structures_utils import pickle_deepcopy\n@@ -23,4 +25,13 @@\n statement[\"Effect\"] = statement.pop(\"effect\")[0]\n if \"effect\" not in statement and \"Effect\" not in statement:\n statement[\"Effect\"] = \"Allow\"\n+ if \"condition\" in statement:\n+ conditions = statement.pop(\"condition\")\n+ if conditions and isinstance(conditions, list):\n+ statement[\"Condition\"] = {}\n+ for condition in conditions:\n+ cond_operator = condition[\"test\"][0]\n+ cond_key = condition[\"variable\"][0]\n+ cond_value = condition[\"values\"][0]\n+ statement[\"Condition\"].setdefault(cond_operator, {})[cond_key] = cond_value\n return result\n", "issue": "Checkov v2.3.261 fails with CKV_AWS_356 for KMS actions which must specify 'all resources'\n**Describe the issue**\r\nCheckov v2.3.261 fails with CKV_AWS_356 highlights IAM policies which are overly permissive but is incorrectly identifying actions for KMS policies which need to be for all resources potentially scoped with conditional access per https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-services.html\r\n\r\nSimilar issue for https://github.com/bridgecrewio/checkov/issues/5134 where certain actions like 'list' require all resources.\r\n\r\n**Examples**\r\n```\r\ndata \"aws_iam_policy_document\" \"myKmsKey\" {\r\n actions = [\r\n \"kms:GenerateDataKey\",\r\n \"kms:Decrypt\"\r\n ]\r\n resources = [\r\n \"*\"\r\n ]\r\n\r\n condition {\r\n test = \"ArnEquals\"\r\n variable = \"aws:SourceArn\"\r\n values = [\r\n <SOME OTHER RESOURCE>.arn\r\n ]\r\n }\r\n }\r\n}\r\n```\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.261\r\n\n", "before_files": [{"content": "from typing import Dict, List, Any\n\nfrom checkov.common.util.data_structures_utils import pickle_deepcopy\n\n\ndef convert_terraform_conf_to_iam_policy(conf: Dict[str, List[Dict[str, Any]]]) -> Dict[str, List[Dict[str, Any]]]:\n \"\"\"\n converts terraform parsed configuration to iam policy document\n \"\"\"\n result = pickle_deepcopy(conf)\n if \"statement\" in result.keys():\n result[\"Statement\"] = result.pop(\"statement\")\n for statement in result[\"Statement\"]:\n if \"actions\" in statement:\n statement[\"Action\"] = statement.pop(\"actions\")[0]\n if \"resources\" in statement:\n statement[\"Resource\"] = statement.pop(\"resources\")[0]\n if \"not_actions\" in statement:\n statement[\"NotAction\"] = statement.pop(\"not_actions\")[0]\n if \"not_resources\" in statement:\n statement[\"NotResource\"] = statement.pop(\"not_resources\")[0]\n if \"effect\" in statement:\n statement[\"Effect\"] = statement.pop(\"effect\")[0]\n if \"effect\" not in statement and \"Effect\" not in statement:\n statement[\"Effect\"] = \"Allow\"\n return result\n", "path": "checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py"}]} | 1,105 | 270 |
gh_patches_debug_8908 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-371 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Slow loading of image features during training
For VQA tasks, it takes massive amount of time to load the image features for training. The overhead is so much that for the same model it increases the training time by a factor of 50.
</issue>
<code>
[start of parlai/core/image_featurizers.py]
1 # All rights reserved.
2 # This source code is licensed under the BSD-style license found in the
3 # LICENSE file in the root directory of this source tree. An additional grant
4 # of patent rights can be found in the PATENTS file in the same directory.
5
6 import parlai.core.build_data as build_data
7
8 import os
9 import copy
10 import numpy as np
11 from PIL import Image
12
13 _greyscale = ' .,:;crsA23hHG#98&@'
14
15 class ImageLoader():
16 """Extract image feature using pretrained CNN network.
17 """
18 def __init__(self, opt):
19 self.opt = copy.deepcopy(opt)
20 self.netCNN = None
21
22 def init_cnn(self):
23 """Lazy initialization of preprocessor model in case we don't need any image preprocessing."""
24 try:
25 import torch
26 except ModuleNotFoundError:
27 raise ModuleNotFoundError('Need to install Pytorch: go to pytorch.org')
28 from torch.autograd import Variable
29 import torchvision
30 import torchvision.transforms as transforms
31 import torch.nn as nn
32
33 opt = self.opt
34 self.image_size = opt['image_size']
35 self.crop_size = opt['image_cropsize']
36 self.datatype = opt['datatype']
37 self.image_mode = opt['image_mode']
38
39 opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()
40 self.use_cuda = opt['cuda']
41
42 if self.use_cuda:
43 print('[ Using CUDA ]')
44 torch.cuda.set_device(opt['gpu'])
45
46 cnn_type, layer_num = self.image_mode_switcher()
47
48 # initialize the pretrained CNN using pytorch.
49 CNN = getattr(torchvision.models, cnn_type)
50
51 # cut off the additional layer.
52 self.netCNN = nn.Sequential(*list(CNN(pretrained=True).children())[:layer_num])
53
54 # initialize the transform function using torch vision.
55 self.transform = transforms.Compose([
56 transforms.Scale(self.image_size),
57 transforms.CenterCrop(self.crop_size),
58 transforms.ToTensor(),
59 transforms.Normalize(mean=[0.485, 0.456, 0.406],
60 std=[0.229, 0.224, 0.225])
61 ])
62
63 # container for single image
64 self.xs = torch.FloatTensor(1, 3, self.crop_size, self.crop_size).fill_(0)
65
66 if self.use_cuda:
67 self.cuda()
68 self.xs = self.xs.cuda()
69
70 # make self.xs variable.
71 self.xs = Variable(self.xs)
72
73 def cuda(self):
74 self.netCNN.cuda()
75
76 def save(self, feature, path):
77 np.save(path, feature)
78
79 def image_mode_switcher(self):
80 switcher = {
81 'resnet152': ['resnet152', -1],
82 'resnet101': ['resnet101', -1],
83 'resnet50': ['resnet50', -1],
84 'resnet34': ['resnet34', -1],
85 'resnet18': ['resnet18', -1],
86 'resnet152_spatial': ['resnet152', -2],
87 'resnet101_spatial': ['resnet101', -2],
88 'resnet50_spatial': ['resnet50', -2],
89 'resnet34_spatial': ['resnet34', -2],
90 'resnet18_spatial': ['resnet18', -2],
91 }
92
93 if self.image_mode not in switcher:
94 raise NotImplementedError('image preprocessing mode' +
95 '{} not supported yet'.format(self.image_mode))
96
97 return switcher.get(self.image_mode)
98
99 def extract(self, image, path):
100 # check whether initlize CNN network.
101 if not self.netCNN:
102 self.init_cnn()
103
104 self.xs.data.copy_(self.transform(image))
105 # extract the image feature
106 feature = self.netCNN(self.xs)
107 feature = feature.cpu().data.numpy()
108 # save the feature
109 self.save(feature, path)
110 return feature
111
112 def img_to_ascii(self, path):
113 im = Image.open(path)
114 im.thumbnail((60, 40), Image.BICUBIC)
115 im = im.convert('L')
116 asc = []
117 for y in range(0, im.size[1]):
118 for x in range(0, im.size[0]):
119 lum = 255 - im.getpixel((x, y))
120 asc.append(_greyscale[lum * len(_greyscale) // 256])
121 asc.append('\n')
122 return ''.join(asc)
123
124 def load(self, path):
125 opt = self.opt
126 mode = opt.get('image_mode', 'raw')
127 if mode is None or mode == 'none':
128 # don't need to load images
129 return None
130 elif mode == 'raw':
131 # raw just returns RGB values
132 return Image.open(path).convert('RGB')
133 elif mode == 'ascii':
134 # convert images to ascii ¯\_(ツ)_/¯
135 return self.img_to_ascii(path)
136 else:
137 # otherwise, looks for preprocessed version under 'mode' directory
138 prepath, imagefn = os.path.split(path)
139
140 dpath = os.path.join(prepath, mode)
141
142 if not os.path.exists(dpath):
143 build_data.make_dir(dpath)
144
145 imagefn = imagefn.split('.')[0]
146 imagefn = imagefn + '.npy'
147 new_path = os.path.join(prepath, mode, imagefn)
148
149 if not os.path.isfile(new_path):
150 return self.extract(Image.open(path).convert('RGB'), new_path)
151 else:
152 return np.load(new_path)
153
[end of parlai/core/image_featurizers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parlai/core/image_featurizers.py b/parlai/core/image_featurizers.py
--- a/parlai/core/image_featurizers.py
+++ b/parlai/core/image_featurizers.py
@@ -9,6 +9,7 @@
import copy
import numpy as np
from PIL import Image
+from functools import lru_cache
_greyscale = ' .,:;crsA23hHG#98&@'
@@ -121,6 +122,7 @@
asc.append('\n')
return ''.join(asc)
+ @lru_cache(maxsize=None)
def load(self, path):
opt = self.opt
mode = opt.get('image_mode', 'raw')
| {"golden_diff": "diff --git a/parlai/core/image_featurizers.py b/parlai/core/image_featurizers.py\n--- a/parlai/core/image_featurizers.py\n+++ b/parlai/core/image_featurizers.py\n@@ -9,6 +9,7 @@\n import copy\n import numpy as np\n from PIL import Image\n+from functools import lru_cache\n \n _greyscale = ' .,:;crsA23hHG#98&@'\n \n@@ -121,6 +122,7 @@\n asc.append('\\n')\n return ''.join(asc)\n \n+ @lru_cache(maxsize=None)\n def load(self, path):\n opt = self.opt\n mode = opt.get('image_mode', 'raw')\n", "issue": "Slow loading of image features during training\nFor VQA tasks, it takes massive amount of time to load the image features for training. The overhead is so much that for the same model it increases the training time by a factor of 50.\n", "before_files": [{"content": "# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport parlai.core.build_data as build_data\n\nimport os\nimport copy\nimport numpy as np\nfrom PIL import Image\n\n_greyscale = ' .,:;crsA23hHG#98&@'\n\nclass ImageLoader():\n \"\"\"Extract image feature using pretrained CNN network.\n \"\"\"\n def __init__(self, opt):\n self.opt = copy.deepcopy(opt)\n self.netCNN = None\n\n def init_cnn(self):\n \"\"\"Lazy initialization of preprocessor model in case we don't need any image preprocessing.\"\"\"\n try:\n import torch\n except ModuleNotFoundError:\n raise ModuleNotFoundError('Need to install Pytorch: go to pytorch.org')\n from torch.autograd import Variable\n import torchvision\n import torchvision.transforms as transforms\n import torch.nn as nn\n\n opt = self.opt\n self.image_size = opt['image_size']\n self.crop_size = opt['image_cropsize']\n self.datatype = opt['datatype']\n self.image_mode = opt['image_mode']\n\n opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()\n self.use_cuda = opt['cuda']\n\n if self.use_cuda:\n print('[ Using CUDA ]')\n torch.cuda.set_device(opt['gpu'])\n\n cnn_type, layer_num = self.image_mode_switcher()\n\n # initialize the pretrained CNN using pytorch.\n CNN = getattr(torchvision.models, cnn_type)\n\n # cut off the additional layer.\n self.netCNN = nn.Sequential(*list(CNN(pretrained=True).children())[:layer_num])\n\n # initialize the transform function using torch vision.\n self.transform = transforms.Compose([\n transforms.Scale(self.image_size),\n transforms.CenterCrop(self.crop_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n # container for single image\n self.xs = torch.FloatTensor(1, 3, self.crop_size, self.crop_size).fill_(0)\n\n if self.use_cuda:\n self.cuda()\n self.xs = self.xs.cuda()\n\n # make self.xs variable.\n self.xs = Variable(self.xs)\n\n def cuda(self):\n self.netCNN.cuda()\n\n def save(self, feature, path):\n np.save(path, feature)\n\n def image_mode_switcher(self):\n switcher = {\n 'resnet152': ['resnet152', -1],\n 'resnet101': ['resnet101', -1],\n 'resnet50': ['resnet50', -1],\n 'resnet34': ['resnet34', -1],\n 'resnet18': ['resnet18', -1],\n 'resnet152_spatial': ['resnet152', -2],\n 'resnet101_spatial': ['resnet101', -2],\n 'resnet50_spatial': ['resnet50', -2],\n 'resnet34_spatial': ['resnet34', -2],\n 'resnet18_spatial': ['resnet18', -2],\n }\n\n if self.image_mode not in switcher:\n raise NotImplementedError('image preprocessing mode' +\n '{} not supported yet'.format(self.image_mode))\n\n return switcher.get(self.image_mode)\n\n def extract(self, image, path):\n # check whether initlize CNN network.\n if not self.netCNN:\n self.init_cnn()\n\n self.xs.data.copy_(self.transform(image))\n # extract the image feature\n feature = self.netCNN(self.xs)\n feature = feature.cpu().data.numpy()\n # save the feature\n self.save(feature, path)\n return feature\n\n def img_to_ascii(self, path):\n im = Image.open(path)\n im.thumbnail((60, 40), Image.BICUBIC)\n im = im.convert('L')\n asc = []\n for y in range(0, im.size[1]):\n for x in range(0, im.size[0]):\n lum = 255 - im.getpixel((x, y))\n asc.append(_greyscale[lum * len(_greyscale) // 256])\n asc.append('\\n')\n return ''.join(asc)\n\n def load(self, path):\n opt = self.opt\n mode = opt.get('image_mode', 'raw')\n if mode is None or mode == 'none':\n # don't need to load images\n return None\n elif mode == 'raw':\n # raw just returns RGB values\n return Image.open(path).convert('RGB')\n elif mode == 'ascii':\n # convert images to ascii \u00af\\_(\u30c4)_/\u00af\n return self.img_to_ascii(path)\n else:\n # otherwise, looks for preprocessed version under 'mode' directory\n prepath, imagefn = os.path.split(path)\n\n dpath = os.path.join(prepath, mode)\n\n if not os.path.exists(dpath):\n build_data.make_dir(dpath)\n\n imagefn = imagefn.split('.')[0]\n imagefn = imagefn + '.npy'\n new_path = os.path.join(prepath, mode, imagefn)\n\n if not os.path.isfile(new_path):\n return self.extract(Image.open(path).convert('RGB'), new_path)\n else:\n return np.load(new_path)\n", "path": "parlai/core/image_featurizers.py"}]} | 2,204 | 169 |
gh_patches_debug_35693 | rasdani/github-patches | git_diff | falconry__falcon-1987 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update asgi look examples to use aioredis 2
A new major vesion of aioredis was released, and it has some api changes.
This is the changelog https://github.com/aio-libs/aioredis-py/blob/master/CHANGELOG.md
</issue>
<code>
[start of examples/asgilook/asgilook/config.py]
1 import os
2 import pathlib
3 import uuid
4
5 import aioredis
6
7
8 class Config:
9 DEFAULT_CONFIG_PATH = '/tmp/asgilook'
10 DEFAULT_MIN_THUMB_SIZE = 64
11 DEFAULT_REDIS_HOST = 'redis://localhost'
12 DEFAULT_REDIS_POOL = aioredis.create_redis_pool
13 DEFAULT_UUID_GENERATOR = uuid.uuid4
14
15 def __init__(self):
16 self.storage_path = pathlib.Path(
17 os.environ.get('ASGI_LOOK_STORAGE_PATH', self.DEFAULT_CONFIG_PATH)
18 )
19 self.storage_path.mkdir(parents=True, exist_ok=True)
20
21 self.create_redis_pool = Config.DEFAULT_REDIS_POOL
22 self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE
23 self.redis_host = self.DEFAULT_REDIS_HOST
24 self.uuid_generator = Config.DEFAULT_UUID_GENERATOR
25
[end of examples/asgilook/asgilook/config.py]
[start of examples/asgilook/asgilook/cache.py]
1 import msgpack
2
3
4 class RedisCache:
5 PREFIX = 'asgilook:'
6 INVALIDATE_ON = frozenset({'DELETE', 'POST', 'PUT'})
7 CACHE_HEADER = 'X-ASGILook-Cache'
8 TTL = 3600
9
10 def __init__(self, config):
11 self._config = config
12
13 # NOTE(vytas): To be initialized upon application startup (see the
14 # method below).
15 self._redis = None
16
17 async def _serialize_response(self, resp):
18 data = await resp.render_body()
19 return msgpack.packb([resp.content_type, data], use_bin_type=True)
20
21 def _deserialize_response(self, resp, data):
22 resp.content_type, resp.data = msgpack.unpackb(data, raw=False)
23 resp.complete = True
24 resp.context.cached = True
25
26 async def process_startup(self, scope, event):
27 if self._redis is None:
28 self._redis = await self._config.create_redis_pool(self._config.redis_host)
29
30 async def process_request(self, req, resp):
31 resp.context.cached = False
32
33 if req.method in self.INVALIDATE_ON:
34 return
35
36 key = f'{self.PREFIX}/{req.path}'
37 data = await self._redis.get(key)
38 if data is not None:
39 self._deserialize_response(resp, data)
40 resp.set_header(self.CACHE_HEADER, 'Hit')
41 else:
42 resp.set_header(self.CACHE_HEADER, 'Miss')
43
44 async def process_response(self, req, resp, resource, req_succeeded):
45 if not req_succeeded:
46 return
47
48 key = f'{self.PREFIX}/{req.path}'
49
50 if req.method in self.INVALIDATE_ON:
51 await self._redis.delete(key)
52 elif not resp.context.cached:
53 data = await self._serialize_response(resp)
54 await self._redis.set(key, data, expire=self.TTL)
55
[end of examples/asgilook/asgilook/cache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/asgilook/asgilook/cache.py b/examples/asgilook/asgilook/cache.py
--- a/examples/asgilook/asgilook/cache.py
+++ b/examples/asgilook/asgilook/cache.py
@@ -9,10 +9,7 @@
def __init__(self, config):
self._config = config
-
- # NOTE(vytas): To be initialized upon application startup (see the
- # method below).
- self._redis = None
+ self._redis = self._config.redis_from_url(self._config.redis_host)
async def _serialize_response(self, resp):
data = await resp.render_body()
@@ -24,8 +21,10 @@
resp.context.cached = True
async def process_startup(self, scope, event):
- if self._redis is None:
- self._redis = await self._config.create_redis_pool(self._config.redis_host)
+ await self._redis.ping()
+
+ async def process_shutdown(self, scope, event):
+ await self._redis.close()
async def process_request(self, req, resp):
resp.context.cached = False
@@ -51,4 +50,4 @@
await self._redis.delete(key)
elif not resp.context.cached:
data = await self._serialize_response(resp)
- await self._redis.set(key, data, expire=self.TTL)
+ await self._redis.set(key, data, ex=self.TTL)
diff --git a/examples/asgilook/asgilook/config.py b/examples/asgilook/asgilook/config.py
--- a/examples/asgilook/asgilook/config.py
+++ b/examples/asgilook/asgilook/config.py
@@ -9,7 +9,7 @@
DEFAULT_CONFIG_PATH = '/tmp/asgilook'
DEFAULT_MIN_THUMB_SIZE = 64
DEFAULT_REDIS_HOST = 'redis://localhost'
- DEFAULT_REDIS_POOL = aioredis.create_redis_pool
+ DEFAULT_REDIS_FROM_URL = aioredis.from_url
DEFAULT_UUID_GENERATOR = uuid.uuid4
def __init__(self):
@@ -18,7 +18,7 @@
)
self.storage_path.mkdir(parents=True, exist_ok=True)
- self.create_redis_pool = Config.DEFAULT_REDIS_POOL
+ self.redis_from_url = Config.DEFAULT_REDIS_FROM_URL
self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE
self.redis_host = self.DEFAULT_REDIS_HOST
self.uuid_generator = Config.DEFAULT_UUID_GENERATOR
| {"golden_diff": "diff --git a/examples/asgilook/asgilook/cache.py b/examples/asgilook/asgilook/cache.py\n--- a/examples/asgilook/asgilook/cache.py\n+++ b/examples/asgilook/asgilook/cache.py\n@@ -9,10 +9,7 @@\n \n def __init__(self, config):\n self._config = config\n-\n- # NOTE(vytas): To be initialized upon application startup (see the\n- # method below).\n- self._redis = None\n+ self._redis = self._config.redis_from_url(self._config.redis_host)\n \n async def _serialize_response(self, resp):\n data = await resp.render_body()\n@@ -24,8 +21,10 @@\n resp.context.cached = True\n \n async def process_startup(self, scope, event):\n- if self._redis is None:\n- self._redis = await self._config.create_redis_pool(self._config.redis_host)\n+ await self._redis.ping()\n+\n+ async def process_shutdown(self, scope, event):\n+ await self._redis.close()\n \n async def process_request(self, req, resp):\n resp.context.cached = False\n@@ -51,4 +50,4 @@\n await self._redis.delete(key)\n elif not resp.context.cached:\n data = await self._serialize_response(resp)\n- await self._redis.set(key, data, expire=self.TTL)\n+ await self._redis.set(key, data, ex=self.TTL)\ndiff --git a/examples/asgilook/asgilook/config.py b/examples/asgilook/asgilook/config.py\n--- a/examples/asgilook/asgilook/config.py\n+++ b/examples/asgilook/asgilook/config.py\n@@ -9,7 +9,7 @@\n DEFAULT_CONFIG_PATH = '/tmp/asgilook'\n DEFAULT_MIN_THUMB_SIZE = 64\n DEFAULT_REDIS_HOST = 'redis://localhost'\n- DEFAULT_REDIS_POOL = aioredis.create_redis_pool\n+ DEFAULT_REDIS_FROM_URL = aioredis.from_url\n DEFAULT_UUID_GENERATOR = uuid.uuid4\n \n def __init__(self):\n@@ -18,7 +18,7 @@\n )\n self.storage_path.mkdir(parents=True, exist_ok=True)\n \n- self.create_redis_pool = Config.DEFAULT_REDIS_POOL\n+ self.redis_from_url = Config.DEFAULT_REDIS_FROM_URL\n self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE\n self.redis_host = self.DEFAULT_REDIS_HOST\n self.uuid_generator = Config.DEFAULT_UUID_GENERATOR\n", "issue": "Update asgi look examples to use aioredis 2\nA new major vesion of aioredis was released, and it has some api changes.\r\nThis is the changelog https://github.com/aio-libs/aioredis-py/blob/master/CHANGELOG.md\n", "before_files": [{"content": "import os\nimport pathlib\nimport uuid\n\nimport aioredis\n\n\nclass Config:\n DEFAULT_CONFIG_PATH = '/tmp/asgilook'\n DEFAULT_MIN_THUMB_SIZE = 64\n DEFAULT_REDIS_HOST = 'redis://localhost'\n DEFAULT_REDIS_POOL = aioredis.create_redis_pool\n DEFAULT_UUID_GENERATOR = uuid.uuid4\n\n def __init__(self):\n self.storage_path = pathlib.Path(\n os.environ.get('ASGI_LOOK_STORAGE_PATH', self.DEFAULT_CONFIG_PATH)\n )\n self.storage_path.mkdir(parents=True, exist_ok=True)\n\n self.create_redis_pool = Config.DEFAULT_REDIS_POOL\n self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE\n self.redis_host = self.DEFAULT_REDIS_HOST\n self.uuid_generator = Config.DEFAULT_UUID_GENERATOR\n", "path": "examples/asgilook/asgilook/config.py"}, {"content": "import msgpack\n\n\nclass RedisCache:\n PREFIX = 'asgilook:'\n INVALIDATE_ON = frozenset({'DELETE', 'POST', 'PUT'})\n CACHE_HEADER = 'X-ASGILook-Cache'\n TTL = 3600\n\n def __init__(self, config):\n self._config = config\n\n # NOTE(vytas): To be initialized upon application startup (see the\n # method below).\n self._redis = None\n\n async def _serialize_response(self, resp):\n data = await resp.render_body()\n return msgpack.packb([resp.content_type, data], use_bin_type=True)\n\n def _deserialize_response(self, resp, data):\n resp.content_type, resp.data = msgpack.unpackb(data, raw=False)\n resp.complete = True\n resp.context.cached = True\n\n async def process_startup(self, scope, event):\n if self._redis is None:\n self._redis = await self._config.create_redis_pool(self._config.redis_host)\n\n async def process_request(self, req, resp):\n resp.context.cached = False\n\n if req.method in self.INVALIDATE_ON:\n return\n\n key = f'{self.PREFIX}/{req.path}'\n data = await self._redis.get(key)\n if data is not None:\n self._deserialize_response(resp, data)\n resp.set_header(self.CACHE_HEADER, 'Hit')\n else:\n resp.set_header(self.CACHE_HEADER, 'Miss')\n\n async def process_response(self, req, resp, resource, req_succeeded):\n if not req_succeeded:\n return\n\n key = f'{self.PREFIX}/{req.path}'\n\n if req.method in self.INVALIDATE_ON:\n await self._redis.delete(key)\n elif not resp.context.cached:\n data = await self._serialize_response(resp)\n await self._redis.set(key, data, expire=self.TTL)\n", "path": "examples/asgilook/asgilook/cache.py"}]} | 1,356 | 552 |
gh_patches_debug_783 | rasdani/github-patches | git_diff | modin-project__modin-3440 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove inheritance of Modin DMatrix from xgb.DMatrix
Inheritance of Modin DMatrix from xgb.DMatrix doesn't include any benefits. Wrong documentation is provided to user using `help(modin.experimtenal.xgboost.DMatrix)` command.
</issue>
<code>
[start of modin/experimental/xgboost/xgboost.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 """Module holds public interfaces for work Modin XGBoost."""
15
16 import logging
17 from typing import Dict, Optional
18
19 import xgboost as xgb
20
21 from modin.config import Engine
22 from modin.distributed.dataframe.pandas import unwrap_partitions
23 import modin.pandas as pd
24
25 LOGGER = logging.getLogger("[modin.xgboost]")
26
27
28 class DMatrix(xgb.DMatrix):
29 """
30 DMatrix holds references to partitions of Modin DataFrame.
31
32 On init stage unwrapping partitions of Modin DataFrame is started.
33
34 Parameters
35 ----------
36 data : modin.pandas.DataFrame
37 Data source of DMatrix.
38 label : modin.pandas.DataFrame or modin.pandas.Series
39 Labels used for training.
40
41 Notes
42 -----
43 Currently DMatrix supports only `data` and `label` parameters.
44 """
45
46 def __init__(self, data, label):
47 assert isinstance(
48 data, pd.DataFrame
49 ), f"Type of `data` is {type(data)}, but expected {pd.DataFrame}."
50 assert isinstance(
51 label, (pd.DataFrame, pd.Series)
52 ), f"Type of `data` is {type(label)}, but expected {pd.DataFrame} or {pd.Series}."
53
54 self.data = unwrap_partitions(data, axis=0, get_ip=True)
55 self.label = unwrap_partitions(label, axis=0)
56
57 self.metadata = (
58 data.index,
59 data.columns,
60 data._query_compiler._modin_frame._row_lengths,
61 )
62
63 def __iter__(self):
64 """
65 Return unwrapped `self.data` and `self.label`.
66
67 Yields
68 ------
69 list
70 List of `self.data` with pairs of references to IP of row partition
71 and row partition [(IP_ref0, partition_ref0), ..].
72 list
73 List of `self.label` with references to row partitions
74 [partition_ref0, ..].
75 """
76 yield self.data
77 yield self.label
78
79
80 class Booster(xgb.Booster):
81 """
82 A Modin Booster of XGBoost.
83
84 Booster is the model of XGBoost, that contains low level routines for
85 training, prediction and evaluation.
86
87 Parameters
88 ----------
89 params : dict, optional
90 Parameters for boosters.
91 cache : list, default: empty
92 List of cache items.
93 model_file : string/os.PathLike/xgb.Booster/bytearray, optional
94 Path to the model file if it's string or PathLike or xgb.Booster.
95 """
96
97 def __init__(self, params=None, cache=(), model_file=None): # noqa: MD01
98 super(Booster, self).__init__(params=params, cache=cache, model_file=model_file)
99
100 def predict(
101 self,
102 data: DMatrix,
103 **kwargs,
104 ):
105 """
106 Run distributed prediction with a trained booster.
107
108 During execution it runs ``xgb.predict`` on each worker for subset of `data`
109 and creates Modin DataFrame with prediction results.
110
111 Parameters
112 ----------
113 data : modin.experimental.xgboost.DMatrix
114 Input data used for prediction.
115 **kwargs : dict
116 Other parameters are the same as for ``xgboost.Booster.predict``.
117
118 Returns
119 -------
120 modin.pandas.DataFrame
121 Modin DataFrame with prediction results.
122 """
123 LOGGER.info("Prediction started")
124
125 if Engine.get() == "Ray":
126 from .xgboost_ray import _predict
127 else:
128 raise ValueError("Current version supports only Ray engine.")
129
130 assert isinstance(
131 data, DMatrix
132 ), f"Type of `data` is {type(data)}, but expected {DMatrix}."
133
134 result = _predict(self.copy(), data, **kwargs)
135 LOGGER.info("Prediction finished")
136
137 return result
138
139
140 def train(
141 params: Dict,
142 dtrain: DMatrix,
143 *args,
144 evals=(),
145 num_actors: Optional[int] = None,
146 evals_result: Optional[Dict] = None,
147 **kwargs,
148 ):
149 """
150 Run distributed training of XGBoost model.
151
152 During work it evenly distributes `dtrain` between workers according
153 to IP addresses partitions (in case of not even distribution of `dtrain`
154 over nodes, some partitions will be re-distributed between nodes),
155 runs xgb.train on each worker for subset of `dtrain` and reduces training results
156 of each worker using Rabit Context.
157
158 Parameters
159 ----------
160 params : dict
161 Booster params.
162 dtrain : modin.experimental.xgboost.DMatrix
163 Data to be trained against.
164 *args : iterable
165 Other parameters for `xgboost.train`.
166 evals : list of pairs (modin.experimental.xgboost.DMatrix, str), default: empty
167 List of validation sets for which metrics will evaluated during training.
168 Validation metrics will help us track the performance of the model.
169 num_actors : int, optional
170 Number of actors for training. If unspecified, this value will be
171 computed automatically.
172 evals_result : dict, optional
173 Dict to store evaluation results in.
174 **kwargs : dict
175 Other parameters are the same as `xgboost.train`.
176
177 Returns
178 -------
179 modin.experimental.xgboost.Booster
180 A trained booster.
181 """
182 LOGGER.info("Training started")
183
184 if Engine.get() == "Ray":
185 from .xgboost_ray import _train
186 else:
187 raise ValueError("Current version supports only Ray engine.")
188
189 assert isinstance(
190 dtrain, DMatrix
191 ), f"Type of `dtrain` is {type(dtrain)}, but expected {DMatrix}."
192 result = _train(dtrain, params, *args, num_actors=num_actors, evals=evals, **kwargs)
193 if isinstance(evals_result, dict):
194 evals_result.update(result["history"])
195
196 LOGGER.info("Training finished")
197 return Booster(model_file=result["booster"])
198
[end of modin/experimental/xgboost/xgboost.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/experimental/xgboost/xgboost.py b/modin/experimental/xgboost/xgboost.py
--- a/modin/experimental/xgboost/xgboost.py
+++ b/modin/experimental/xgboost/xgboost.py
@@ -25,7 +25,7 @@
LOGGER = logging.getLogger("[modin.xgboost]")
-class DMatrix(xgb.DMatrix):
+class DMatrix:
"""
DMatrix holds references to partitions of Modin DataFrame.
| {"golden_diff": "diff --git a/modin/experimental/xgboost/xgboost.py b/modin/experimental/xgboost/xgboost.py\n--- a/modin/experimental/xgboost/xgboost.py\n+++ b/modin/experimental/xgboost/xgboost.py\n@@ -25,7 +25,7 @@\n LOGGER = logging.getLogger(\"[modin.xgboost]\")\n \n \n-class DMatrix(xgb.DMatrix):\n+class DMatrix:\n \"\"\"\n DMatrix holds references to partitions of Modin DataFrame.\n", "issue": "Remove inheritance of Modin DMatrix from xgb.DMatrix\nInheritance of Modin DMatrix from xgb.DMatrix doesn't include any benefits. Wrong documentation is provided to user using `help(modin.experimtenal.xgboost.DMatrix)` command.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module holds public interfaces for work Modin XGBoost.\"\"\"\n\nimport logging\nfrom typing import Dict, Optional\n\nimport xgboost as xgb\n\nfrom modin.config import Engine\nfrom modin.distributed.dataframe.pandas import unwrap_partitions\nimport modin.pandas as pd\n\nLOGGER = logging.getLogger(\"[modin.xgboost]\")\n\n\nclass DMatrix(xgb.DMatrix):\n \"\"\"\n DMatrix holds references to partitions of Modin DataFrame.\n\n On init stage unwrapping partitions of Modin DataFrame is started.\n\n Parameters\n ----------\n data : modin.pandas.DataFrame\n Data source of DMatrix.\n label : modin.pandas.DataFrame or modin.pandas.Series\n Labels used for training.\n\n Notes\n -----\n Currently DMatrix supports only `data` and `label` parameters.\n \"\"\"\n\n def __init__(self, data, label):\n assert isinstance(\n data, pd.DataFrame\n ), f\"Type of `data` is {type(data)}, but expected {pd.DataFrame}.\"\n assert isinstance(\n label, (pd.DataFrame, pd.Series)\n ), f\"Type of `data` is {type(label)}, but expected {pd.DataFrame} or {pd.Series}.\"\n\n self.data = unwrap_partitions(data, axis=0, get_ip=True)\n self.label = unwrap_partitions(label, axis=0)\n\n self.metadata = (\n data.index,\n data.columns,\n data._query_compiler._modin_frame._row_lengths,\n )\n\n def __iter__(self):\n \"\"\"\n Return unwrapped `self.data` and `self.label`.\n\n Yields\n ------\n list\n List of `self.data` with pairs of references to IP of row partition\n and row partition [(IP_ref0, partition_ref0), ..].\n list\n List of `self.label` with references to row partitions\n [partition_ref0, ..].\n \"\"\"\n yield self.data\n yield self.label\n\n\nclass Booster(xgb.Booster):\n \"\"\"\n A Modin Booster of XGBoost.\n\n Booster is the model of XGBoost, that contains low level routines for\n training, prediction and evaluation.\n\n Parameters\n ----------\n params : dict, optional\n Parameters for boosters.\n cache : list, default: empty\n List of cache items.\n model_file : string/os.PathLike/xgb.Booster/bytearray, optional\n Path to the model file if it's string or PathLike or xgb.Booster.\n \"\"\"\n\n def __init__(self, params=None, cache=(), model_file=None): # noqa: MD01\n super(Booster, self).__init__(params=params, cache=cache, model_file=model_file)\n\n def predict(\n self,\n data: DMatrix,\n **kwargs,\n ):\n \"\"\"\n Run distributed prediction with a trained booster.\n\n During execution it runs ``xgb.predict`` on each worker for subset of `data`\n and creates Modin DataFrame with prediction results.\n\n Parameters\n ----------\n data : modin.experimental.xgboost.DMatrix\n Input data used for prediction.\n **kwargs : dict\n Other parameters are the same as for ``xgboost.Booster.predict``.\n\n Returns\n -------\n modin.pandas.DataFrame\n Modin DataFrame with prediction results.\n \"\"\"\n LOGGER.info(\"Prediction started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _predict\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n assert isinstance(\n data, DMatrix\n ), f\"Type of `data` is {type(data)}, but expected {DMatrix}.\"\n\n result = _predict(self.copy(), data, **kwargs)\n LOGGER.info(\"Prediction finished\")\n\n return result\n\n\ndef train(\n params: Dict,\n dtrain: DMatrix,\n *args,\n evals=(),\n num_actors: Optional[int] = None,\n evals_result: Optional[Dict] = None,\n **kwargs,\n):\n \"\"\"\n Run distributed training of XGBoost model.\n\n During work it evenly distributes `dtrain` between workers according\n to IP addresses partitions (in case of not even distribution of `dtrain`\n over nodes, some partitions will be re-distributed between nodes),\n runs xgb.train on each worker for subset of `dtrain` and reduces training results\n of each worker using Rabit Context.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : modin.experimental.xgboost.DMatrix\n Data to be trained against.\n *args : iterable\n Other parameters for `xgboost.train`.\n evals : list of pairs (modin.experimental.xgboost.DMatrix, str), default: empty\n List of validation sets for which metrics will evaluated during training.\n Validation metrics will help us track the performance of the model.\n num_actors : int, optional\n Number of actors for training. If unspecified, this value will be\n computed automatically.\n evals_result : dict, optional\n Dict to store evaluation results in.\n **kwargs : dict\n Other parameters are the same as `xgboost.train`.\n\n Returns\n -------\n modin.experimental.xgboost.Booster\n A trained booster.\n \"\"\"\n LOGGER.info(\"Training started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _train\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n assert isinstance(\n dtrain, DMatrix\n ), f\"Type of `dtrain` is {type(dtrain)}, but expected {DMatrix}.\"\n result = _train(dtrain, params, *args, num_actors=num_actors, evals=evals, **kwargs)\n if isinstance(evals_result, dict):\n evals_result.update(result[\"history\"])\n\n LOGGER.info(\"Training finished\")\n return Booster(model_file=result[\"booster\"])\n", "path": "modin/experimental/xgboost/xgboost.py"}]} | 2,550 | 107 |
gh_patches_debug_8028 | rasdani/github-patches | git_diff | e-valuation__EvaP-848 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Username case sensitivity
Usernames are case sensitive. The importer makes all usernames lowercase, but automatically created accounts when logging in with Kerberos authentification can have uppercase letters.
This can lead to two users having the same username and then the system crashed on login.
Automatically created accounts should also get lowercase usernames, even if the user enters the name differently.
</issue>
<code>
[start of evap/evaluation/forms.py]
1 from django import forms
2 from django.contrib.auth import authenticate
3 from django.utils.translation import ugettext_lazy as _
4 from django.views.decorators.debug import sensitive_variables
5
6 from evap.evaluation.models import UserProfile
7
8
9 class LoginUsernameForm(forms.Form):
10 """Form encapsulating the login with username and password, for example from an Active Directory.
11 """
12
13 username = forms.CharField(label=_("Username"), max_length=254)
14 password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
15
16 def __init__(self, request=None, *args, **kwargs):
17 """
18 If request is passed in, the form will validate that cookies are
19 enabled. Note that the request (a HttpRequest object) must have set a
20 cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
21 running this validation.
22 """
23 self.request = request
24 self.user_cache = None
25 super().__init__(*args, **kwargs)
26
27 @sensitive_variables('password')
28 def clean_password(self):
29 username = self.cleaned_data.get('username')
30 password = self.cleaned_data.get('password')
31
32 if username and password:
33 self.user_cache = authenticate(username=username, password=password)
34 if self.user_cache is None:
35 raise forms.ValidationError(_("Please enter a correct username and password."))
36 self.check_for_test_cookie()
37 return password
38
39 def check_for_test_cookie(self):
40 if self.request and not self.request.session.test_cookie_worked():
41 raise forms.ValidationError(_("Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."))
42
43 def get_user_id(self):
44 if self.user_cache:
45 return self.user_cache.id
46 return None
47
48 def get_user(self):
49 return self.user_cache
50
51
52 class NewKeyForm(forms.Form):
53 email = forms.EmailField(label=_("Email address"))
54
55 def __init__(self, *args, **kwargs):
56 self.user_cache = None
57
58 super().__init__(*args, **kwargs)
59
60 def clean_email(self):
61 email = self.cleaned_data.get('email')
62
63 if not UserProfile.email_needs_login_key(email):
64 raise forms.ValidationError(_("HPI users cannot request login keys. Please login using your domain credentials."))
65
66 try:
67 user = UserProfile.objects.get(email__iexact=email)
68 self.user_cache = user
69 except UserProfile.DoesNotExist:
70 raise forms.ValidationError(_("No user with this email address was found. Please make sure to enter the email address already known to the university office."))
71
72 return email
73
74 def get_user(self):
75 return self.user_cache
76
[end of evap/evaluation/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/forms.py b/evap/evaluation/forms.py
--- a/evap/evaluation/forms.py
+++ b/evap/evaluation/forms.py
@@ -29,6 +29,9 @@
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
+ # django-auth-kerberos might create a new userprofile. make sure it gets a lowercase username.
+ username = username.lower()
+
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
| {"golden_diff": "diff --git a/evap/evaluation/forms.py b/evap/evaluation/forms.py\n--- a/evap/evaluation/forms.py\n+++ b/evap/evaluation/forms.py\n@@ -29,6 +29,9 @@\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n \n+ # django-auth-kerberos might create a new userprofile. make sure it gets a lowercase username.\n+ username = username.lower()\n+\n if username and password:\n self.user_cache = authenticate(username=username, password=password)\n if self.user_cache is None:\n", "issue": "Username case sensitivity\nUsernames are case sensitive. The importer makes all usernames lowercase, but automatically created accounts when logging in with Kerberos authentification can have uppercase letters.\nThis can lead to two users having the same username and then the system crashed on login.\n\nAutomatically created accounts should also get lowercase usernames, even if the user enters the name differently.\n\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth import authenticate\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.debug import sensitive_variables\n\nfrom evap.evaluation.models import UserProfile\n\n\nclass LoginUsernameForm(forms.Form):\n \"\"\"Form encapsulating the login with username and password, for example from an Active Directory.\n \"\"\"\n\n username = forms.CharField(label=_(\"Username\"), max_length=254)\n password = forms.CharField(label=_(\"Password\"), widget=forms.PasswordInput)\n\n def __init__(self, request=None, *args, **kwargs):\n \"\"\"\n If request is passed in, the form will validate that cookies are\n enabled. Note that the request (a HttpRequest object) must have set a\n cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before\n running this validation.\n \"\"\"\n self.request = request\n self.user_cache = None\n super().__init__(*args, **kwargs)\n\n @sensitive_variables('password')\n def clean_password(self):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n\n if username and password:\n self.user_cache = authenticate(username=username, password=password)\n if self.user_cache is None:\n raise forms.ValidationError(_(\"Please enter a correct username and password.\"))\n self.check_for_test_cookie()\n return password\n\n def check_for_test_cookie(self):\n if self.request and not self.request.session.test_cookie_worked():\n raise forms.ValidationError(_(\"Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in.\"))\n\n def get_user_id(self):\n if self.user_cache:\n return self.user_cache.id\n return None\n\n def get_user(self):\n return self.user_cache\n\n\nclass NewKeyForm(forms.Form):\n email = forms.EmailField(label=_(\"Email address\"))\n\n def __init__(self, *args, **kwargs):\n self.user_cache = None\n\n super().__init__(*args, **kwargs)\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n\n if not UserProfile.email_needs_login_key(email):\n raise forms.ValidationError(_(\"HPI users cannot request login keys. Please login using your domain credentials.\"))\n\n try:\n user = UserProfile.objects.get(email__iexact=email)\n self.user_cache = user\n except UserProfile.DoesNotExist:\n raise forms.ValidationError(_(\"No user with this email address was found. Please make sure to enter the email address already known to the university office.\"))\n\n return email\n\n def get_user(self):\n return self.user_cache\n", "path": "evap/evaluation/forms.py"}]} | 1,300 | 130 |
gh_patches_debug_211 | rasdani/github-patches | git_diff | mdn__kuma-6489 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't browse users in django admin now that tags are gone
https://sentry.prod.mozaws.net/operations/mdn-prod/issues/7273070/
```
Resolver404: {'tried': [[<RegexURLPattern None ^media/(?:redesign/)?css/(?P<doc>.*)-min.css$>], [<RegexURLPattern None ^media/(?:redesign/)?js/(?P<doc>.*)-min.js$>], [<RegexURLPattern None ^media/(?:redesign/)?img(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?css(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?js(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?fonts(?P<suffix>.*)$>], [<RegexURLPattern None ^media/uploads/demos/(?:.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)//(?P<three>.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_1_canvas_rect.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_2_canvas_moveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_3_canvas_lineto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_4_canvas_arc.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_5_canvas_quadraticcurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_6_canvas_beziercurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_1_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_2_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_3_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_4_canvas_gallery.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_1_canvas_fillstyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_2_canvas_strokestyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_3_canvas_globalalpha.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_4_canvas_rgba.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_5_canvas_linewidth.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_6_canvas_linecap.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_7_canvas_linejoin.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_8_canvas_miterlimit.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_9_canvas_lineargradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_10_canvas_radialgradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_11_canvas_createpattern.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_1_canvas_savestate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_2_canvas_translate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_3_canvas_rotate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_4_canvas_scale.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_1_canvas_composite.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_2_canvas_clipping.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/globalCompositeOperation.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/backdrop.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/bg_gallery.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_1.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_2.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_3.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_4.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_5.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_6.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_7.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_8.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/picture_frame.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/rhino.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/wallpaper.png$>], [<RegexURLPattern None (?i)^samples/domref/mozGetAsFile.html$>], [<RegexURLPattern None (?i)^samples/raycaster/input.js$>], [<RegexURLPattern None (?i)^samples/raycaster/Level.js$>], [<RegexURL...
File "redirect_urls/middleware.py", line 14, in __call__
resolver_match = self.resolver.resolve(request.path_info)
File "newrelic/hooks/framework_django.py", line 600, in wrapper
return _wrapped(*args, **kwargs)
File "newrelic/hooks/framework_django.py", line 588, in _wrapped
result = wrapped(path)
File "newrelic/hooks/framework_django.py", line 575, in wrapper
return wrapped(*args, **kwargs)
File "django/urls/resolvers.py", line 394, in resolve
raise Resolver404({'tried': tried, 'path': new_path})
FieldError: Cannot resolve keyword 'tags' into field. Choices are: auth_token, bans, bans_issued, bio, created_attachment_revisions, created_revisions, created_toolbars, date_joined, discourse_url, documentattachment, documentdeletionlog, documentspam_reviewed, documentspamattempt, email, emailaddress, facebook_url, first_name, flag, fullname, github_url, groups, homepage, id, irc_nickname, is_active, is_github_url_public, is_newsletter_subscribed, is_staff, is_superuser, key, last_login, last_name, linkedin_url, locale, location, logentry, mozillians_url, organization, password, revisionakismetsubmission, socialaccount, stackoverflow_url, stripe_customer_id, timezone, title, twitter_url, user_permissions, username, watch, website_url
(18 additional frame(s) were not displayed)
...
File "django/db/models/sql/query.py", line 1268, in _add_q
child_clause, needed_inner = self._add_q(
File "django/db/models/sql/query.py", line 1273, in _add_q
child_clause, needed_inner = self.build_filter(
File "django/db/models/sql/query.py", line 1154, in build_filter
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
File "django/db/models/sql/query.py", line 1034, in solve_lookup_type
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
File "django/db/models/sql/query.py", line 1351, in names_to_path
raise FieldError("Cannot resolve keyword '%s' into field. "
FieldError: Cannot resolve keyword 'tags' into field. Choices are: auth_token, bans, bans_issued, bio, created_attachment_revisions, created_revisions, created_toolbars, date_joined, discourse_url, documentattachment, documentdeletionlog, documentspam_reviewed, documentspamattempt, email, emailaddress, facebook_url, first_name, flag, fullname, github_url, groups, homepage, id, irc_nickname, is_active, is_github_url_public, is_newsletter_subscribed, is_staff, is_superuser, key, last_login, last_name, linkedin_url, locale, location, logentry, mozillians_url, organization, password, revisionakismetsubmission, socialaccount, stackoverflow_url, stripe_customer_id, timezone, title, twitter_url, user_permissions, username, watch, website_url
```
</issue>
<code>
[start of kuma/users/admin.py]
1 from django.contrib import admin
2 from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
3 from django.utils.html import format_html
4
5 from kuma.core.urlresolvers import reverse
6 from kuma.core.utils import urlparams
7
8 from .models import User, UserBan
9
10
11 @admin.register(UserBan)
12 class UserBanAdmin(admin.ModelAdmin):
13 fields = ("user", "by", "reason", "is_active")
14 list_display = ("user", "by", "reason", "is_active")
15 list_editable = ("is_active",)
16 list_filter = ("is_active",)
17 raw_id_fields = ("user", "by")
18 search_fields = ("user__username", "reason", "by__username")
19
20
21 @admin.register(User)
22 class UserAdmin(BaseUserAdmin):
23 """
24 Extends the admin view of users to show date_joined field
25 add a filter on the field too
26 """
27
28 list_display = (
29 "username",
30 "fullname",
31 "email",
32 "revisions",
33 "date_joined",
34 "is_staff",
35 "is_active",
36 )
37 list_filter = ("is_staff", "is_superuser", "is_active", "date_joined", "groups")
38 ordering = ("-date_joined",)
39 search_fields = (
40 "username",
41 "title",
42 "fullname",
43 "organization",
44 "location",
45 "email",
46 "tags__name",
47 )
48
49 def revisions(self, obj):
50 """HTML link to user's revisions with count"""
51 link = urlparams(reverse("dashboards.revisions"), user=obj.username)
52 count = obj.created_revisions.count()
53 return format_html('<a href="{}"><strong>{}</strong></a>', link, count)
54
[end of kuma/users/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/users/admin.py b/kuma/users/admin.py
--- a/kuma/users/admin.py
+++ b/kuma/users/admin.py
@@ -43,7 +43,6 @@
"organization",
"location",
"email",
- "tags__name",
)
def revisions(self, obj):
| {"golden_diff": "diff --git a/kuma/users/admin.py b/kuma/users/admin.py\n--- a/kuma/users/admin.py\n+++ b/kuma/users/admin.py\n@@ -43,7 +43,6 @@\n \"organization\",\n \"location\",\n \"email\",\n- \"tags__name\",\n )\n \n def revisions(self, obj):\n", "issue": "Can't browse users in django admin now that tags are gone\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/7273070/\n\n```\nResolver404: {'tried': [[<RegexURLPattern None ^media/(?:redesign/)?css/(?P<doc>.*)-min.css$>], [<RegexURLPattern None ^media/(?:redesign/)?js/(?P<doc>.*)-min.js$>], [<RegexURLPattern None ^media/(?:redesign/)?img(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?css(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?js(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?fonts(?P<suffix>.*)$>], [<RegexURLPattern None ^media/uploads/demos/(?:.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)//(?P<three>.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_1_canvas_rect.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_2_canvas_moveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_3_canvas_lineto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_4_canvas_arc.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_5_canvas_quadraticcurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_6_canvas_beziercurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_1_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_2_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_3_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_4_canvas_gallery.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_1_canvas_fillstyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_2_canvas_strokestyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_3_canvas_globalalpha.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_4_canvas_rgba.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_5_canvas_linewidth.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_6_canvas_linecap.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_7_canvas_linejoin.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_8_canvas_miterlimit.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_9_canvas_lineargradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_10_canvas_radialgradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_11_canvas_createpattern.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_1_canvas_savestate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_2_canvas_translate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_3_canvas_rotate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_4_canvas_scale.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_1_canvas_composite.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_2_canvas_clipping.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/globalCompositeOperation.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/backdrop.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/bg_gallery.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_1.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_2.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_3.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_4.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_5.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_6.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_7.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_8.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/picture_frame.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/rhino.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/wallpaper.png$>], [<RegexURLPattern None (?i)^samples/domref/mozGetAsFile.html$>], [<RegexURLPattern None (?i)^samples/raycaster/input.js$>], [<RegexURLPattern None (?i)^samples/raycaster/Level.js$>], [<RegexURL...\n File \"redirect_urls/middleware.py\", line 14, in __call__\n resolver_match = self.resolver.resolve(request.path_info)\n File \"newrelic/hooks/framework_django.py\", line 600, in wrapper\n return _wrapped(*args, **kwargs)\n File \"newrelic/hooks/framework_django.py\", line 588, in _wrapped\n result = wrapped(path)\n File \"newrelic/hooks/framework_django.py\", line 575, in wrapper\n return wrapped(*args, **kwargs)\n File \"django/urls/resolvers.py\", line 394, in resolve\n raise Resolver404({'tried': tried, 'path': new_path})\n\nFieldError: Cannot resolve keyword 'tags' into field. Choices are: auth_token, bans, bans_issued, bio, created_attachment_revisions, created_revisions, created_toolbars, date_joined, discourse_url, documentattachment, documentdeletionlog, documentspam_reviewed, documentspamattempt, email, emailaddress, facebook_url, first_name, flag, fullname, github_url, groups, homepage, id, irc_nickname, is_active, is_github_url_public, is_newsletter_subscribed, is_staff, is_superuser, key, last_login, last_name, linkedin_url, locale, location, logentry, mozillians_url, organization, password, revisionakismetsubmission, socialaccount, stackoverflow_url, stripe_customer_id, timezone, title, twitter_url, user_permissions, username, watch, website_url\n(18 additional frame(s) were not displayed)\n...\n File \"django/db/models/sql/query.py\", line 1268, in _add_q\n child_clause, needed_inner = self._add_q(\n File \"django/db/models/sql/query.py\", line 1273, in _add_q\n child_clause, needed_inner = self.build_filter(\n File \"django/db/models/sql/query.py\", line 1154, in build_filter\n lookups, parts, reffed_expression = self.solve_lookup_type(arg)\n File \"django/db/models/sql/query.py\", line 1034, in solve_lookup_type\n _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())\n File \"django/db/models/sql/query.py\", line 1351, in names_to_path\n raise FieldError(\"Cannot resolve keyword '%s' into field. \"\n\nFieldError: Cannot resolve keyword 'tags' into field. Choices are: auth_token, bans, bans_issued, bio, created_attachment_revisions, created_revisions, created_toolbars, date_joined, discourse_url, documentattachment, documentdeletionlog, documentspam_reviewed, documentspamattempt, email, emailaddress, facebook_url, first_name, flag, fullname, github_url, groups, homepage, id, irc_nickname, is_active, is_github_url_public, is_newsletter_subscribed, is_staff, is_superuser, key, last_login, last_name, linkedin_url, locale, location, logentry, mozillians_url, organization, password, revisionakismetsubmission, socialaccount, stackoverflow_url, stripe_customer_id, timezone, title, twitter_url, user_permissions, username, watch, website_url\n```\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.utils.html import format_html\n\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import urlparams\n\nfrom .models import User, UserBan\n\n\[email protected](UserBan)\nclass UserBanAdmin(admin.ModelAdmin):\n fields = (\"user\", \"by\", \"reason\", \"is_active\")\n list_display = (\"user\", \"by\", \"reason\", \"is_active\")\n list_editable = (\"is_active\",)\n list_filter = (\"is_active\",)\n raw_id_fields = (\"user\", \"by\")\n search_fields = (\"user__username\", \"reason\", \"by__username\")\n\n\[email protected](User)\nclass UserAdmin(BaseUserAdmin):\n \"\"\"\n Extends the admin view of users to show date_joined field\n add a filter on the field too\n \"\"\"\n\n list_display = (\n \"username\",\n \"fullname\",\n \"email\",\n \"revisions\",\n \"date_joined\",\n \"is_staff\",\n \"is_active\",\n )\n list_filter = (\"is_staff\", \"is_superuser\", \"is_active\", \"date_joined\", \"groups\")\n ordering = (\"-date_joined\",)\n search_fields = (\n \"username\",\n \"title\",\n \"fullname\",\n \"organization\",\n \"location\",\n \"email\",\n \"tags__name\",\n )\n\n def revisions(self, obj):\n \"\"\"HTML link to user's revisions with count\"\"\"\n link = urlparams(reverse(\"dashboards.revisions\"), user=obj.username)\n count = obj.created_revisions.count()\n return format_html('<a href=\"{}\"><strong>{}</strong></a>', link, count)\n", "path": "kuma/users/admin.py"}]} | 2,985 | 73 |
gh_patches_debug_6806 | rasdani/github-patches | git_diff | Pylons__pyramid-2674 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
reify docstring doesn't render properly
The testsetup section is omitted from the rendered docs at http://pyramid.readthedocs.io/en/latest/api/decorator.html#pyramid.decorator.reify
Unfortunately this section is essential for understanding the example.
</issue>
<code>
[start of pyramid/decorator.py]
1 from functools import update_wrapper
2
3
4 class reify(object):
5 """ Use as a class method decorator. It operates almost exactly like the
6 Python ``@property`` decorator, but it puts the result of the method it
7 decorates into the instance dict after the first call, effectively
8 replacing the function it decorates with an instance variable. It is, in
9 Python parlance, a non-data descriptor. An example:
10
11 .. testsetup::
12
13 from pyramid.decorator import reify
14
15 class Foo(object):
16 @reify
17 def jammy(self):
18 print('jammy called')
19 return 1
20
21 And usage of Foo:
22
23 .. doctest::
24
25 >>> f = Foo()
26 >>> v = f.jammy
27 jammy called
28 >>> print(v)
29 1
30 >>> f.jammy
31 1
32 >>> # jammy func not called the second time; it replaced itself with 1
33 >>> # Note: reassignment is possible
34 >>> f.jammy = 2
35 >>> f.jammy
36 2
37 """
38 def __init__(self, wrapped):
39 self.wrapped = wrapped
40 update_wrapper(self, wrapped)
41
42 def __get__(self, inst, objtype=None):
43 if inst is None:
44 return self
45 val = self.wrapped(inst)
46 setattr(inst, self.wrapped.__name__, val)
47 return val
48
49
[end of pyramid/decorator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyramid/decorator.py b/pyramid/decorator.py
--- a/pyramid/decorator.py
+++ b/pyramid/decorator.py
@@ -8,6 +8,16 @@
replacing the function it decorates with an instance variable. It is, in
Python parlance, a non-data descriptor. An example:
+ .. code-block:: python
+
+ from pyramid.decorator import reify
+
+ class Foo(object):
+ @reify
+ def jammy(self):
+ print('jammy called')
+ return 1
+
.. testsetup::
from pyramid.decorator import reify
| {"golden_diff": "diff --git a/pyramid/decorator.py b/pyramid/decorator.py\n--- a/pyramid/decorator.py\n+++ b/pyramid/decorator.py\n@@ -8,6 +8,16 @@\n replacing the function it decorates with an instance variable. It is, in\n Python parlance, a non-data descriptor. An example:\n \n+ .. code-block:: python\n+\n+ from pyramid.decorator import reify\n+\n+ class Foo(object):\n+ @reify\n+ def jammy(self):\n+ print('jammy called')\n+ return 1\n+\n .. testsetup::\n \n from pyramid.decorator import reify\n", "issue": "reify docstring doesn't render properly\nThe testsetup section is omitted from the rendered docs at http://pyramid.readthedocs.io/en/latest/api/decorator.html#pyramid.decorator.reify\n\nUnfortunately this section is essential for understanding the example.\n\n", "before_files": [{"content": "from functools import update_wrapper\n\n\nclass reify(object):\n \"\"\" Use as a class method decorator. It operates almost exactly like the\n Python ``@property`` decorator, but it puts the result of the method it\n decorates into the instance dict after the first call, effectively\n replacing the function it decorates with an instance variable. It is, in\n Python parlance, a non-data descriptor. An example:\n\n .. testsetup::\n\n from pyramid.decorator import reify\n\n class Foo(object):\n @reify\n def jammy(self):\n print('jammy called')\n return 1\n\n And usage of Foo:\n\n .. doctest::\n\n >>> f = Foo()\n >>> v = f.jammy\n jammy called\n >>> print(v)\n 1\n >>> f.jammy\n 1\n >>> # jammy func not called the second time; it replaced itself with 1\n >>> # Note: reassignment is possible\n >>> f.jammy = 2\n >>> f.jammy\n 2\n \"\"\"\n def __init__(self, wrapped):\n self.wrapped = wrapped\n update_wrapper(self, wrapped)\n\n def __get__(self, inst, objtype=None):\n if inst is None:\n return self\n val = self.wrapped(inst)\n setattr(inst, self.wrapped.__name__, val)\n return val\n\n", "path": "pyramid/decorator.py"}]} | 994 | 149 |
gh_patches_debug_5736 | rasdani/github-patches | git_diff | google__fuzzbench-630 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make issues: debug is broken, run is impossible to control-C out of
make debug- is broken. `make debug-libfuzzer-zlib_zlib_uncompress_fuzzer` fails with this error:
```
docker run \
--cpus=1 \
--cap-add SYS_NICE \
--cap-add SYS_PTRACE \
-e FUZZ_OUTSIDE_EXPERIMENT=1 \
-e FORCE_LOCAL=1 \
-e TRIAL_ID=1 \
-e FUZZER=libfuzzer \
-e BENCHMARK=zlib_zlib_uncompress_fuzzer \
-e FUZZ_TARGET=zlib_uncompress_fuzzer \
-entrypoint "/bin/bash" \
-it gcr.io/fuzzbench/runners/libfuzzer/zlib_zlib_uncompress_fuzzer
docker: invalid reference format.
See 'docker run --help'.
make: *** [docker/generated.mk:26568: debug-libfuzzer-zlib_zlib_uncompress_fuzzer] Error 125
```
make `run-libfuzzer-zlib_zlib_uncompress_fuzzer` runs forever and cannot be Ctrl-Ced out of.
</issue>
<code>
[start of docker/generate_makefile.py]
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Simple generator for local Makefile rules."""
15
16 import os
17
18 from common import yaml_utils
19 from common import benchmark_utils
20 from common import fuzzer_utils
21 from experiment.build import docker_images
22
23 BASE_TAG = "gcr.io/fuzzbench"
24 BENCHMARK_DIR = benchmark_utils.BENCHMARKS_DIR
25
26
27 def _print_benchmark_fuzz_target(benchmarks):
28 """Prints benchmark variables from benchmark.yaml files."""
29 for benchmark in benchmarks:
30 benchmark_vars = yaml_utils.read(
31 os.path.join(BENCHMARK_DIR, benchmark, 'benchmark.yaml'))
32 print(benchmark + '-fuzz-target=' + benchmark_vars['fuzz_target'])
33 print()
34
35
36 def _print_makefile_run_template(image):
37 fuzzer, benchmark = image['tag'].split('/')[1:]
38
39 for run_type in ('run', 'debug', 'test-run'):
40 print(('{run_type}-{fuzzer}-{benchmark}: ' +
41 '.{fuzzer}-{benchmark}-runner').format(run_type=run_type,
42 benchmark=benchmark,
43 fuzzer=fuzzer))
44
45 print('\
46 \tdocker run \\\n\
47 \t--cpus=1 \\\n\
48 \t--cap-add SYS_NICE \\\n\
49 \t--cap-add SYS_PTRACE \\\n\
50 \t-e FUZZ_OUTSIDE_EXPERIMENT=1 \\\n\
51 \t-e FORCE_LOCAL=1 \\\n\
52 \t-e TRIAL_ID=1 \\\n\
53 \t-e FUZZER={fuzzer} \\\n\
54 \t-e BENCHMARK={benchmark} \\\n\
55 \t-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\
56 '.format(fuzzer=fuzzer, benchmark=benchmark))
57
58 if run_type == 'test-run':
59 print('\t-e MAX_TOTAL_TIME=20 \\\n\t-e SNAPSHOT_PERIOD=10 \\')
60 if run_type == 'debug':
61 print('\t--entrypoint "/bin/bash" \\\n\t-it ', end='')
62 else:
63 print('\t', end='')
64
65 print(os.path.join(BASE_TAG, image['tag']))
66 print()
67
68
69 # TODO(tanq16): Add unit test.
70 def _print_rules_for_image(name, image):
71 """Print makefile section for given image to stdout."""
72 if not ('base' in name or 'dispatcher' in name):
73 print('.', end='')
74 print(name + ':', end='')
75 if 'depends_on' in image:
76 for dep in image['depends_on']:
77 if 'base' in dep:
78 print(' ' + dep, end='')
79 else:
80 print(' .' + dep, end='')
81 print()
82 print('\tdocker build \\')
83 print('\t--tag ' + os.path.join(BASE_TAG, image['tag']) + ' \\')
84 print('\t--build-arg BUILDKIT_INLINE_CACHE=1 \\')
85 print('\t--cache-from ' + os.path.join(BASE_TAG, image['tag']) + ' \\')
86 if 'build_arg' in image:
87 for arg in image['build_arg']:
88 print('\t--build-arg ' + arg + ' \\')
89 if 'dockerfile' in image:
90 print('\t--file ' + image['dockerfile'] + ' \\')
91 print('\t' + image['context'])
92 print()
93
94 # Print run, debug, test-run rules if image is a runner.
95 if 'runner' in name and not ('intermediate' in name or 'base' in name):
96 _print_makefile_run_template(image)
97
98
99 def main():
100 """Generates Makefile with docker image build rules."""
101 fuzzers = fuzzer_utils.get_fuzzer_names()
102 benchmarks = benchmark_utils.get_all_benchmarks()
103 buildable_images = docker_images.get_images_to_build(fuzzers, benchmarks)
104
105 print('export DOCKER_BUILDKIT := 1')
106
107 # Print oss-fuzz benchmarks property variables.
108 _print_benchmark_fuzz_target(benchmarks)
109
110 for name, image in buildable_images.items():
111 _print_rules_for_image(name, image)
112
113 # Print build targets for all fuzzer-benchmark pairs (including coverage).
114 fuzzers.append('coverage')
115 for fuzzer in fuzzers:
116 image_type = "runner"
117 if 'coverage' in fuzzer:
118 image_type = "builder"
119 for benchmark in benchmarks:
120 print(('build-{fuzzer}-{benchmark}: ' +
121 '.{fuzzer}-{benchmark}-{image_type}\n').format(
122 fuzzer=fuzzer,
123 benchmark=benchmark,
124 image_type=image_type))
125 print()
126
127 # Print fuzzer-all benchmarks build targets.
128 for fuzzer in fuzzers:
129 all_build_targets = ' '.join([
130 'build-{0}-{1}'.format(fuzzer, benchmark)
131 for benchmark in benchmarks
132 ])
133 print('build-{fuzzer}-all: {all_targets}'.format(
134 fuzzer=fuzzer, all_targets=all_build_targets))
135
136 # Print all targets build target.
137 all_build_targets = ' '.join(
138 ['build-{0}-all'.format(name) for name in fuzzers])
139 print('build-all: {all_targets}'.format(all_targets=all_build_targets))
140
141
142 if __name__ == '__main__':
143 main()
144
[end of docker/generate_makefile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/generate_makefile.py b/docker/generate_makefile.py
--- a/docker/generate_makefile.py
+++ b/docker/generate_makefile.py
@@ -59,6 +59,8 @@
print('\t-e MAX_TOTAL_TIME=20 \\\n\t-e SNAPSHOT_PERIOD=10 \\')
if run_type == 'debug':
print('\t--entrypoint "/bin/bash" \\\n\t-it ', end='')
+ elif run_type == 'run':
+ print('\t-it ', end='')
else:
print('\t', end='')
| {"golden_diff": "diff --git a/docker/generate_makefile.py b/docker/generate_makefile.py\n--- a/docker/generate_makefile.py\n+++ b/docker/generate_makefile.py\n@@ -59,6 +59,8 @@\n print('\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\')\n if run_type == 'debug':\n print('\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end='')\n+ elif run_type == 'run':\n+ print('\\t-it ', end='')\n else:\n print('\\t', end='')\n", "issue": "Make issues: debug is broken, run is impossible to control-C out of\nmake debug- is broken. `make debug-libfuzzer-zlib_zlib_uncompress_fuzzer` fails with this error:\r\n```\r\ndocker run \\ \r\n--cpus=1 \\ \r\n--cap-add SYS_NICE \\ \r\n--cap-add SYS_PTRACE \\ \r\n-e FUZZ_OUTSIDE_EXPERIMENT=1 \\ \r\n-e FORCE_LOCAL=1 \\ \r\n-e TRIAL_ID=1 \\ \r\n-e FUZZER=libfuzzer \\ \r\n-e BENCHMARK=zlib_zlib_uncompress_fuzzer \\ \r\n-e FUZZ_TARGET=zlib_uncompress_fuzzer \\ \r\n-entrypoint \"/bin/bash\" \\ \r\n-it gcr.io/fuzzbench/runners/libfuzzer/zlib_zlib_uncompress_fuzzer \r\ndocker: invalid reference format. \r\nSee 'docker run --help'. \r\nmake: *** [docker/generated.mk:26568: debug-libfuzzer-zlib_zlib_uncompress_fuzzer] Error 125\r\n```\r\n\r\nmake `run-libfuzzer-zlib_zlib_uncompress_fuzzer` runs forever and cannot be Ctrl-Ced out of. \n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Simple generator for local Makefile rules.\"\"\"\n\nimport os\n\nfrom common import yaml_utils\nfrom common import benchmark_utils\nfrom common import fuzzer_utils\nfrom experiment.build import docker_images\n\nBASE_TAG = \"gcr.io/fuzzbench\"\nBENCHMARK_DIR = benchmark_utils.BENCHMARKS_DIR\n\n\ndef _print_benchmark_fuzz_target(benchmarks):\n \"\"\"Prints benchmark variables from benchmark.yaml files.\"\"\"\n for benchmark in benchmarks:\n benchmark_vars = yaml_utils.read(\n os.path.join(BENCHMARK_DIR, benchmark, 'benchmark.yaml'))\n print(benchmark + '-fuzz-target=' + benchmark_vars['fuzz_target'])\n print()\n\n\ndef _print_makefile_run_template(image):\n fuzzer, benchmark = image['tag'].split('/')[1:]\n\n for run_type in ('run', 'debug', 'test-run'):\n print(('{run_type}-{fuzzer}-{benchmark}: ' +\n '.{fuzzer}-{benchmark}-runner').format(run_type=run_type,\n benchmark=benchmark,\n fuzzer=fuzzer))\n\n print('\\\n\\tdocker run \\\\\\n\\\n\\t--cpus=1 \\\\\\n\\\n\\t--cap-add SYS_NICE \\\\\\n\\\n\\t--cap-add SYS_PTRACE \\\\\\n\\\n\\t-e FUZZ_OUTSIDE_EXPERIMENT=1 \\\\\\n\\\n\\t-e FORCE_LOCAL=1 \\\\\\n\\\n\\t-e TRIAL_ID=1 \\\\\\n\\\n\\t-e FUZZER={fuzzer} \\\\\\n\\\n\\t-e BENCHMARK={benchmark} \\\\\\n\\\n\\t-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\\\\\n'.format(fuzzer=fuzzer, benchmark=benchmark))\n\n if run_type == 'test-run':\n print('\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\')\n if run_type == 'debug':\n print('\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end='')\n else:\n print('\\t', end='')\n\n print(os.path.join(BASE_TAG, image['tag']))\n print()\n\n\n# TODO(tanq16): Add unit test.\ndef _print_rules_for_image(name, image):\n \"\"\"Print makefile section for given image to stdout.\"\"\"\n if not ('base' in name or 'dispatcher' in name):\n print('.', end='')\n print(name + ':', end='')\n if 'depends_on' in image:\n for dep in image['depends_on']:\n if 'base' in dep:\n print(' ' + dep, end='')\n else:\n print(' .' + dep, end='')\n print()\n print('\\tdocker build \\\\')\n print('\\t--tag ' + os.path.join(BASE_TAG, image['tag']) + ' \\\\')\n print('\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\')\n print('\\t--cache-from ' + os.path.join(BASE_TAG, image['tag']) + ' \\\\')\n if 'build_arg' in image:\n for arg in image['build_arg']:\n print('\\t--build-arg ' + arg + ' \\\\')\n if 'dockerfile' in image:\n print('\\t--file ' + image['dockerfile'] + ' \\\\')\n print('\\t' + image['context'])\n print()\n\n # Print run, debug, test-run rules if image is a runner.\n if 'runner' in name and not ('intermediate' in name or 'base' in name):\n _print_makefile_run_template(image)\n\n\ndef main():\n \"\"\"Generates Makefile with docker image build rules.\"\"\"\n fuzzers = fuzzer_utils.get_fuzzer_names()\n benchmarks = benchmark_utils.get_all_benchmarks()\n buildable_images = docker_images.get_images_to_build(fuzzers, benchmarks)\n\n print('export DOCKER_BUILDKIT := 1')\n\n # Print oss-fuzz benchmarks property variables.\n _print_benchmark_fuzz_target(benchmarks)\n\n for name, image in buildable_images.items():\n _print_rules_for_image(name, image)\n\n # Print build targets for all fuzzer-benchmark pairs (including coverage).\n fuzzers.append('coverage')\n for fuzzer in fuzzers:\n image_type = \"runner\"\n if 'coverage' in fuzzer:\n image_type = \"builder\"\n for benchmark in benchmarks:\n print(('build-{fuzzer}-{benchmark}: ' +\n '.{fuzzer}-{benchmark}-{image_type}\\n').format(\n fuzzer=fuzzer,\n benchmark=benchmark,\n image_type=image_type))\n print()\n\n # Print fuzzer-all benchmarks build targets.\n for fuzzer in fuzzers:\n all_build_targets = ' '.join([\n 'build-{0}-{1}'.format(fuzzer, benchmark)\n for benchmark in benchmarks\n ])\n print('build-{fuzzer}-all: {all_targets}'.format(\n fuzzer=fuzzer, all_targets=all_build_targets))\n\n # Print all targets build target.\n all_build_targets = ' '.join(\n ['build-{0}-all'.format(name) for name in fuzzers])\n print('build-all: {all_targets}'.format(all_targets=all_build_targets))\n\n\nif __name__ == '__main__':\n main()\n", "path": "docker/generate_makefile.py"}]} | 2,375 | 127 |
gh_patches_debug_2197 | rasdani/github-patches | git_diff | microsoft__torchgeo-1755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SustainBenchCropYield download doesn't work
### Description
Downloading the SustainBenchCropYield dataset doesn't work as expected
### Steps to reproduce
```
ds = SustainBenchCropYield("data/", download=True)
```
This downloads a file called `soybeans` then fails unzipping `soybeans.zip`. Works if you rename to .zip and unzip manually.
### Version
0.6.0.dev0
</issue>
<code>
[start of torchgeo/datasets/sustainbench_crop_yield.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """SustainBench Crop Yield dataset."""
5
6 import os
7 from typing import Any, Callable, Optional
8
9 import matplotlib.pyplot as plt
10 import numpy as np
11 import torch
12 from matplotlib.figure import Figure
13 from torch import Tensor
14
15 from .geo import NonGeoDataset
16 from .utils import DatasetNotFoundError, download_url, extract_archive
17
18
19 class SustainBenchCropYield(NonGeoDataset):
20 """SustainBench Crop Yield Dataset.
21
22 This dataset contains MODIS band histograms and soybean yield
23 estimates for selected counties in the USA, Argentina and Brazil.
24 The dataset is part of the
25 `SustainBench <https://sustainlab-group.github.io/sustainbench/docs/datasets/sdg2/crop_yield.html>`_
26 datasets for tackling the UN Sustainable Development Goals (SDGs).
27
28 Dataset Format:
29
30 * .npz files of stacked samples
31
32 Dataset Features:
33
34 * input histogram of 7 surface reflectance and 2 surface temperature
35 bands from MODIS pixel values in 32 ranges across 32 timesteps
36 resulting in 32x32x9 input images
37 * regression target value of soybean yield in metric tonnes per
38 harvested hectare
39
40 If you use this dataset in your research, please cite:
41
42 * https://doi.org/10.1145/3209811.3212707
43 * https://doi.org/10.1609/aaai.v31i1.11172
44
45 .. versionadded:: 0.5
46 """ # noqa: E501
47
48 valid_countries = ["usa", "brazil", "argentina"]
49
50 md5 = "c2794e59512c897d9bea77b112848122"
51
52 url = "https://drive.google.com/file/d/1odwkI1hiE5rMZ4VfM0hOXzlFR4NbhrfU/view?usp=share_link" # noqa: E501
53
54 dir = "soybeans"
55
56 valid_splits = ["train", "dev", "test"]
57
58 def __init__(
59 self,
60 root: str = "data",
61 split: str = "train",
62 countries: list[str] = ["usa"],
63 transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,
64 download: bool = False,
65 checksum: bool = False,
66 ) -> None:
67 """Initialize a new Dataset instance.
68
69 Args:
70 root: root directory where dataset can be found
71 split: one of "train", "dev", or "test"
72 countries: which countries to include in the dataset
73 transforms: a function/transform that takes an input sample
74 and returns a transformed version
75 download: if True, download dataset and store it in the root directory
76 checksum: if True, check the MD5 after downloading files (may be slow)
77
78 Raises:
79 AssertionError: if ``countries`` contains invalid countries or if ``split``
80 is invalid
81 DatasetNotFoundError: If dataset is not found and *download* is False.
82 """
83 assert set(countries).issubset(
84 self.valid_countries
85 ), f"Please choose a subset of these valid countried: {self.valid_countries}."
86 self.countries = countries
87
88 assert (
89 split in self.valid_splits
90 ), f"Pleas choose one of these valid data splits {self.valid_splits}."
91 self.split = split
92
93 self.root = root
94 self.transforms = transforms
95 self.download = download
96 self.checksum = checksum
97
98 self._verify()
99 self.collection = self.retrieve_collection()
100
101 def __len__(self) -> int:
102 """Return the number of data points in the dataset.
103
104 Returns:
105 length of the dataset
106 """
107 return len(self.collection)
108
109 def __getitem__(self, index: int) -> dict[str, Tensor]:
110 """Return an index within the dataset.
111
112 Args:
113 index: index to return
114
115 Returns:
116 data and label at that index
117 """
118 input_file_path, sample_idx = self.collection[index]
119
120 sample: dict[str, Tensor] = {
121 "image": self._load_image(input_file_path, sample_idx)
122 }
123 sample.update(self._load_features(input_file_path, sample_idx))
124
125 if self.transforms is not None:
126 sample = self.transforms(sample)
127
128 return sample
129
130 def _load_image(self, path: str, sample_idx: int) -> Tensor:
131 """Load input image.
132
133 Args:
134 path: path to input npz collection
135 sample_idx: what sample to index from the npz collection
136
137 Returns:
138 input image as tensor
139 """
140 arr = np.load(path)["data"][sample_idx]
141 # return [channel, height, width]
142 return torch.from_numpy(arr).permute(2, 0, 1).to(torch.float32)
143
144 def _load_features(self, path: str, sample_idx: int) -> dict[str, Tensor]:
145 """Load features value.
146
147 Args:
148 path: path to image npz collection
149 sample_idx: what sample to index from the npz collection
150
151 Returns:
152 target regression value
153 """
154 target_file_path = path.replace("_hists", "_yields")
155 target = np.load(target_file_path)["data"][sample_idx]
156
157 years_file_path = path.replace("_hists", "_years")
158 year = int(np.load(years_file_path)["data"][sample_idx])
159
160 ndvi_file_path = path.replace("_hists", "_ndvi")
161 ndvi = np.load(ndvi_file_path)["data"][sample_idx]
162
163 features = {
164 "label": torch.tensor(target).to(torch.float32),
165 "year": torch.tensor(year),
166 "ndvi": torch.from_numpy(ndvi).to(dtype=torch.float32),
167 }
168 return features
169
170 def retrieve_collection(self) -> list[tuple[str, int]]:
171 """Retrieve the collection.
172
173 Returns:
174 path and index to dataset samples
175 """
176 collection = []
177 for country in self.countries:
178 file_path = os.path.join(
179 self.root, self.dir, country, f"{self.split}_hists.npz"
180 )
181 npz_file = np.load(file_path)
182 num_data_points = npz_file["data"].shape[0]
183 for idx in range(num_data_points):
184 collection.append((file_path, idx))
185
186 return collection
187
188 def _verify(self) -> None:
189 """Verify the integrity of the dataset."""
190 # Check if the extracted files already exist
191 pathname = os.path.join(self.root, self.dir)
192 if os.path.exists(pathname):
193 return
194
195 # Check if the zip files have already been downloaded
196 pathname = os.path.join(self.root, self.dir) + ".zip"
197 if os.path.exists(pathname):
198 self._extract()
199 return
200
201 # Check if the user requested to download the dataset
202 if not self.download:
203 raise DatasetNotFoundError(self)
204
205 # Download the dataset
206 self._download()
207 self._extract()
208
209 def _download(self) -> None:
210 """Download the dataset and extract it."""
211 download_url(
212 self.url,
213 self.root,
214 filename=self.dir,
215 md5=self.md5 if self.checksum else None,
216 )
217 self._extract()
218
219 def _extract(self) -> None:
220 """Extract the dataset."""
221 zipfile_path = os.path.join(self.root, self.dir) + ".zip"
222 extract_archive(zipfile_path, self.root)
223
224 def plot(
225 self,
226 sample: dict[str, Any],
227 band_idx: int = 0,
228 show_titles: bool = True,
229 suptitle: Optional[str] = None,
230 ) -> Figure:
231 """Plot a sample from the dataset.
232
233 Args:
234 sample: a sample return by :meth:`__getitem__`
235 band_idx: which of the nine histograms to index
236 show_titles: flag indicating whether to show titles above each panel
237 suptitle: optional suptitle to use for figure
238
239 Returns:
240 a matplotlib Figure with the rendered sample
241
242 """
243 image, label = sample["image"], sample["label"].item()
244
245 showing_predictions = "prediction" in sample
246 if showing_predictions:
247 prediction = sample["prediction"].item()
248
249 fig, ax = plt.subplots(1, 1, figsize=(10, 10))
250
251 ax.imshow(image.permute(1, 2, 0)[:, :, band_idx])
252 ax.axis("off")
253
254 if show_titles:
255 title = f"Label: {label:.3f}"
256 if showing_predictions:
257 title += f"\nPrediction: {prediction:.3f}"
258 ax.set_title(title)
259
260 if suptitle is not None:
261 plt.suptitle(suptitle)
262
263 return fig
264
[end of torchgeo/datasets/sustainbench_crop_yield.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchgeo/datasets/sustainbench_crop_yield.py b/torchgeo/datasets/sustainbench_crop_yield.py
--- a/torchgeo/datasets/sustainbench_crop_yield.py
+++ b/torchgeo/datasets/sustainbench_crop_yield.py
@@ -211,7 +211,7 @@
download_url(
self.url,
self.root,
- filename=self.dir,
+ filename=self.dir + ".zip",
md5=self.md5 if self.checksum else None,
)
self._extract()
| {"golden_diff": "diff --git a/torchgeo/datasets/sustainbench_crop_yield.py b/torchgeo/datasets/sustainbench_crop_yield.py\n--- a/torchgeo/datasets/sustainbench_crop_yield.py\n+++ b/torchgeo/datasets/sustainbench_crop_yield.py\n@@ -211,7 +211,7 @@\n download_url(\n self.url,\n self.root,\n- filename=self.dir,\n+ filename=self.dir + \".zip\",\n md5=self.md5 if self.checksum else None,\n )\n self._extract()\n", "issue": "SustainBenchCropYield download doesn't work\n### Description\n\nDownloading the SustainBenchCropYield dataset doesn't work as expected\n\n### Steps to reproduce\n\n```\r\nds = SustainBenchCropYield(\"data/\", download=True)\r\n```\r\n\r\nThis downloads a file called `soybeans` then fails unzipping `soybeans.zip`. Works if you rename to .zip and unzip manually.\n\n### Version\n\n0.6.0.dev0\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"SustainBench Crop Yield dataset.\"\"\"\n\nimport os\nfrom typing import Any, Callable, Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\n\nfrom .geo import NonGeoDataset\nfrom .utils import DatasetNotFoundError, download_url, extract_archive\n\n\nclass SustainBenchCropYield(NonGeoDataset):\n \"\"\"SustainBench Crop Yield Dataset.\n\n This dataset contains MODIS band histograms and soybean yield\n estimates for selected counties in the USA, Argentina and Brazil.\n The dataset is part of the\n `SustainBench <https://sustainlab-group.github.io/sustainbench/docs/datasets/sdg2/crop_yield.html>`_\n datasets for tackling the UN Sustainable Development Goals (SDGs).\n\n Dataset Format:\n\n * .npz files of stacked samples\n\n Dataset Features:\n\n * input histogram of 7 surface reflectance and 2 surface temperature\n bands from MODIS pixel values in 32 ranges across 32 timesteps\n resulting in 32x32x9 input images\n * regression target value of soybean yield in metric tonnes per\n harvested hectare\n\n If you use this dataset in your research, please cite:\n\n * https://doi.org/10.1145/3209811.3212707\n * https://doi.org/10.1609/aaai.v31i1.11172\n\n .. versionadded:: 0.5\n \"\"\" # noqa: E501\n\n valid_countries = [\"usa\", \"brazil\", \"argentina\"]\n\n md5 = \"c2794e59512c897d9bea77b112848122\"\n\n url = \"https://drive.google.com/file/d/1odwkI1hiE5rMZ4VfM0hOXzlFR4NbhrfU/view?usp=share_link\" # noqa: E501\n\n dir = \"soybeans\"\n\n valid_splits = [\"train\", \"dev\", \"test\"]\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n countries: list[str] = [\"usa\"],\n transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new Dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: one of \"train\", \"dev\", or \"test\"\n countries: which countries to include in the dataset\n transforms: a function/transform that takes an input sample\n and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 after downloading files (may be slow)\n\n Raises:\n AssertionError: if ``countries`` contains invalid countries or if ``split``\n is invalid\n DatasetNotFoundError: If dataset is not found and *download* is False.\n \"\"\"\n assert set(countries).issubset(\n self.valid_countries\n ), f\"Please choose a subset of these valid countried: {self.valid_countries}.\"\n self.countries = countries\n\n assert (\n split in self.valid_splits\n ), f\"Pleas choose one of these valid data splits {self.valid_splits}.\"\n self.split = split\n\n self.root = root\n self.transforms = transforms\n self.download = download\n self.checksum = checksum\n\n self._verify()\n self.collection = self.retrieve_collection()\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.collection)\n\n def __getitem__(self, index: int) -> dict[str, Tensor]:\n \"\"\"Return an index within the dataset.\n\n Args:\n index: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n input_file_path, sample_idx = self.collection[index]\n\n sample: dict[str, Tensor] = {\n \"image\": self._load_image(input_file_path, sample_idx)\n }\n sample.update(self._load_features(input_file_path, sample_idx))\n\n if self.transforms is not None:\n sample = self.transforms(sample)\n\n return sample\n\n def _load_image(self, path: str, sample_idx: int) -> Tensor:\n \"\"\"Load input image.\n\n Args:\n path: path to input npz collection\n sample_idx: what sample to index from the npz collection\n\n Returns:\n input image as tensor\n \"\"\"\n arr = np.load(path)[\"data\"][sample_idx]\n # return [channel, height, width]\n return torch.from_numpy(arr).permute(2, 0, 1).to(torch.float32)\n\n def _load_features(self, path: str, sample_idx: int) -> dict[str, Tensor]:\n \"\"\"Load features value.\n\n Args:\n path: path to image npz collection\n sample_idx: what sample to index from the npz collection\n\n Returns:\n target regression value\n \"\"\"\n target_file_path = path.replace(\"_hists\", \"_yields\")\n target = np.load(target_file_path)[\"data\"][sample_idx]\n\n years_file_path = path.replace(\"_hists\", \"_years\")\n year = int(np.load(years_file_path)[\"data\"][sample_idx])\n\n ndvi_file_path = path.replace(\"_hists\", \"_ndvi\")\n ndvi = np.load(ndvi_file_path)[\"data\"][sample_idx]\n\n features = {\n \"label\": torch.tensor(target).to(torch.float32),\n \"year\": torch.tensor(year),\n \"ndvi\": torch.from_numpy(ndvi).to(dtype=torch.float32),\n }\n return features\n\n def retrieve_collection(self) -> list[tuple[str, int]]:\n \"\"\"Retrieve the collection.\n\n Returns:\n path and index to dataset samples\n \"\"\"\n collection = []\n for country in self.countries:\n file_path = os.path.join(\n self.root, self.dir, country, f\"{self.split}_hists.npz\"\n )\n npz_file = np.load(file_path)\n num_data_points = npz_file[\"data\"].shape[0]\n for idx in range(num_data_points):\n collection.append((file_path, idx))\n\n return collection\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\"\"\"\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, self.dir)\n if os.path.exists(pathname):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.dir) + \".zip\"\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise DatasetNotFoundError(self)\n\n # Download the dataset\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n download_url(\n self.url,\n self.root,\n filename=self.dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n zipfile_path = os.path.join(self.root, self.dir) + \".zip\"\n extract_archive(zipfile_path, self.root)\n\n def plot(\n self,\n sample: dict[str, Any],\n band_idx: int = 0,\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample return by :meth:`__getitem__`\n band_idx: which of the nine histograms to index\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional suptitle to use for figure\n\n Returns:\n a matplotlib Figure with the rendered sample\n\n \"\"\"\n image, label = sample[\"image\"], sample[\"label\"].item()\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n prediction = sample[\"prediction\"].item()\n\n fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n\n ax.imshow(image.permute(1, 2, 0)[:, :, band_idx])\n ax.axis(\"off\")\n\n if show_titles:\n title = f\"Label: {label:.3f}\"\n if showing_predictions:\n title += f\"\\nPrediction: {prediction:.3f}\"\n ax.set_title(title)\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n", "path": "torchgeo/datasets/sustainbench_crop_yield.py"}]} | 3,319 | 117 |
gh_patches_debug_27882 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-644 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Where is `fast_pred_var` moving?
I received the following warning when using `fast_pred_var`:
```
/cluster/nhunt/anaconda/envs/bayes_opt/lib/python3.7/site-packages/gpytorch/beta_features.py:17:
DeprecationWarning: `gpytorch.settings.fast_pred_var` has moved to `gpytorch.settings.fast_pred_var`.
```
It seems that I'm being warned that `fast_pred_var` has moved to its current location. Was there a typo in the warning about how we should be using this setting now?
```bash
$ pip list | grep gpytorch
gpytorch 0.2.1
```
</issue>
<code>
[start of gpytorch/__init__.py]
1 #!/usr/bin/env python3
2 from .module import Module
3 from . import (
4 beta_features,
5 distributions,
6 kernels,
7 lazy,
8 likelihoods,
9 means,
10 mlls,
11 models,
12 priors,
13 settings,
14 utils,
15 variational,
16 )
17 from .functions import (
18 add_diag,
19 add_jitter,
20 dsmm,
21 inv_matmul,
22 inv_quad,
23 inv_quad_logdet,
24 logdet,
25 log_normal_cdf,
26 matmul,
27 normal_cdf,
28 root_decomposition,
29 root_inv_decomposition,
30 # Deprecated
31 inv_quad_log_det,
32 log_det,
33 )
34 from .mlls import ExactMarginalLogLikelihood, VariationalMarginalLogLikelihood
35 from .lazy import lazify, delazify
36
37
38 __version__ = "0.2.1"
39
40 # Old deprecated stuff
41 fast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, "gpytorch.settings.fast_pred_var")
42
43 __all__ = [
44 # Submodules
45 "distributions",
46 "kernels",
47 "lazy",
48 "likelihoods",
49 "means",
50 "mlls",
51 "models",
52 "priors",
53 "utils",
54 "variational",
55 # Classes
56 "Module",
57 "ExactMarginalLogLikelihood",
58 "VariationalMarginalLogLikelihood",
59 # Functions
60 "add_diag",
61 "add_jitter",
62 "delazify",
63 "dsmm",
64 "inv_matmul",
65 "inv_quad",
66 "inv_quad_logdet",
67 "lazify",
68 "logdet",
69 "log_normal_cdf",
70 "matmul",
71 "normal_cdf",
72 "root_decomposition",
73 "root_inv_decomposition",
74 # Context managers
75 "beta_features",
76 "settings",
77 # Other
78 "__version__",
79 # Deprecated
80 "fast_pred_var",
81 "inv_quad_log_det",
82 "log_det",
83 ]
84
[end of gpytorch/__init__.py]
[start of gpytorch/beta_features.py]
1 #!/usr/bin/env python3
2
3 import warnings
4 from .settings import _feature_flag, _value_context
5 from .settings import fast_pred_var as _fast_pred_var
6 from .settings import fast_pred_samples as _fast_pred_samples
7
8
9 class _moved_beta_feature(object):
10 def __init__(self, new_cls, orig_name=None):
11 self.new_cls = new_cls
12 self.orig_name = orig_name if orig_name is not None else "gpytorch.settings.{}".format(new_cls.__name__)
13
14 def __call__(self, *args, **kwargs):
15 warnings.warn(
16 "`{}` has moved to `gpytorch.settings.{}`.".format(self.orig_name, self.new_cls.__name__),
17 DeprecationWarning
18 )
19 return self.new_cls(*args, **kwargs)
20
21 def __getattr__(self, name):
22 return getattr(self.new_cls, name)
23
24
25 fast_pred_var = _moved_beta_feature(_fast_pred_var)
26 fast_pred_samples = _moved_beta_feature(_fast_pred_samples)
27
28
29 class checkpoint_kernel(_value_context):
30 """
31 Should the kernel be computed in chunks with checkpointing or not? (Default, no)
32
33 If `split_size = 0`:
34 The kernel is computed explicitly. During training, the kernel matrix is
35 kept in memory for the backward pass. This is the fastest option but the
36 most memory intensive.
37 If `split_size > 0`:
38 The kernel is never fully computed or stored. Instead, the kernel is only
39 accessed through matrix multiplication. The matrix multiplication is
40 computed in `segments` chunks. This is slower, but requires significantly less memory.
41
42 Default: 0
43 """
44
45 _global_value = 0
46
47
48 class diagonal_correction(_feature_flag):
49 """
50 Add a diagonal correction to scalable inducing point methods
51 """
52
53 _state = True
54
55
56 class default_preconditioner(_feature_flag):
57 """
58 Add a diagonal correction to scalable inducing point methods
59 """
60
61 pass
62
63
64 __all__ = ["fast_pred_var", "fast_pred_samples", "diagonal_correction", "default_preconditioner"]
65
[end of gpytorch/beta_features.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpytorch/__init__.py b/gpytorch/__init__.py
--- a/gpytorch/__init__.py
+++ b/gpytorch/__init__.py
@@ -37,9 +37,6 @@
__version__ = "0.2.1"
-# Old deprecated stuff
-fast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, "gpytorch.settings.fast_pred_var")
-
__all__ = [
# Submodules
"distributions",
@@ -77,7 +74,6 @@
# Other
"__version__",
# Deprecated
- "fast_pred_var",
"inv_quad_log_det",
"log_det",
]
diff --git a/gpytorch/beta_features.py b/gpytorch/beta_features.py
--- a/gpytorch/beta_features.py
+++ b/gpytorch/beta_features.py
@@ -2,8 +2,6 @@
import warnings
from .settings import _feature_flag, _value_context
-from .settings import fast_pred_var as _fast_pred_var
-from .settings import fast_pred_samples as _fast_pred_samples
class _moved_beta_feature(object):
@@ -22,10 +20,6 @@
return getattr(self.new_cls, name)
-fast_pred_var = _moved_beta_feature(_fast_pred_var)
-fast_pred_samples = _moved_beta_feature(_fast_pred_samples)
-
-
class checkpoint_kernel(_value_context):
"""
Should the kernel be computed in chunks with checkpointing or not? (Default, no)
@@ -61,4 +55,4 @@
pass
-__all__ = ["fast_pred_var", "fast_pred_samples", "diagonal_correction", "default_preconditioner"]
+__all__ = ["checkpoint_kernel", "diagonal_correction", "default_preconditioner"]
| {"golden_diff": "diff --git a/gpytorch/__init__.py b/gpytorch/__init__.py\n--- a/gpytorch/__init__.py\n+++ b/gpytorch/__init__.py\n@@ -37,9 +37,6 @@\n \n __version__ = \"0.2.1\"\n \n-# Old deprecated stuff\n-fast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, \"gpytorch.settings.fast_pred_var\")\n-\n __all__ = [\n # Submodules\n \"distributions\",\n@@ -77,7 +74,6 @@\n # Other\n \"__version__\",\n # Deprecated\n- \"fast_pred_var\",\n \"inv_quad_log_det\",\n \"log_det\",\n ]\ndiff --git a/gpytorch/beta_features.py b/gpytorch/beta_features.py\n--- a/gpytorch/beta_features.py\n+++ b/gpytorch/beta_features.py\n@@ -2,8 +2,6 @@\n \n import warnings\n from .settings import _feature_flag, _value_context\n-from .settings import fast_pred_var as _fast_pred_var\n-from .settings import fast_pred_samples as _fast_pred_samples\n \n \n class _moved_beta_feature(object):\n@@ -22,10 +20,6 @@\n return getattr(self.new_cls, name)\n \n \n-fast_pred_var = _moved_beta_feature(_fast_pred_var)\n-fast_pred_samples = _moved_beta_feature(_fast_pred_samples)\n-\n-\n class checkpoint_kernel(_value_context):\n \"\"\"\n Should the kernel be computed in chunks with checkpointing or not? (Default, no)\n@@ -61,4 +55,4 @@\n pass\n \n \n-__all__ = [\"fast_pred_var\", \"fast_pred_samples\", \"diagonal_correction\", \"default_preconditioner\"]\n+__all__ = [\"checkpoint_kernel\", \"diagonal_correction\", \"default_preconditioner\"]\n", "issue": "Where is `fast_pred_var` moving?\nI received the following warning when using `fast_pred_var`:\r\n\r\n```\r\n/cluster/nhunt/anaconda/envs/bayes_opt/lib/python3.7/site-packages/gpytorch/beta_features.py:17:\r\nDeprecationWarning: `gpytorch.settings.fast_pred_var` has moved to `gpytorch.settings.fast_pred_var`.\r\n```\r\n\r\nIt seems that I'm being warned that `fast_pred_var` has moved to its current location. Was there a typo in the warning about how we should be using this setting now?\r\n\r\n```bash\r\n$ pip list | grep gpytorch\r\ngpytorch 0.2.1\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\nfrom .module import Module\nfrom . import (\n beta_features,\n distributions,\n kernels,\n lazy,\n likelihoods,\n means,\n mlls,\n models,\n priors,\n settings,\n utils,\n variational,\n)\nfrom .functions import (\n add_diag,\n add_jitter,\n dsmm,\n inv_matmul,\n inv_quad,\n inv_quad_logdet,\n logdet,\n log_normal_cdf,\n matmul,\n normal_cdf,\n root_decomposition,\n root_inv_decomposition,\n # Deprecated\n inv_quad_log_det,\n log_det,\n)\nfrom .mlls import ExactMarginalLogLikelihood, VariationalMarginalLogLikelihood\nfrom .lazy import lazify, delazify\n\n\n__version__ = \"0.2.1\"\n\n# Old deprecated stuff\nfast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, \"gpytorch.settings.fast_pred_var\")\n\n__all__ = [\n # Submodules\n \"distributions\",\n \"kernels\",\n \"lazy\",\n \"likelihoods\",\n \"means\",\n \"mlls\",\n \"models\",\n \"priors\",\n \"utils\",\n \"variational\",\n # Classes\n \"Module\",\n \"ExactMarginalLogLikelihood\",\n \"VariationalMarginalLogLikelihood\",\n # Functions\n \"add_diag\",\n \"add_jitter\",\n \"delazify\",\n \"dsmm\",\n \"inv_matmul\",\n \"inv_quad\",\n \"inv_quad_logdet\",\n \"lazify\",\n \"logdet\",\n \"log_normal_cdf\",\n \"matmul\",\n \"normal_cdf\",\n \"root_decomposition\",\n \"root_inv_decomposition\",\n # Context managers\n \"beta_features\",\n \"settings\",\n # Other\n \"__version__\",\n # Deprecated\n \"fast_pred_var\",\n \"inv_quad_log_det\",\n \"log_det\",\n]\n", "path": "gpytorch/__init__.py"}, {"content": "#!/usr/bin/env python3\n\nimport warnings\nfrom .settings import _feature_flag, _value_context\nfrom .settings import fast_pred_var as _fast_pred_var\nfrom .settings import fast_pred_samples as _fast_pred_samples\n\n\nclass _moved_beta_feature(object):\n def __init__(self, new_cls, orig_name=None):\n self.new_cls = new_cls\n self.orig_name = orig_name if orig_name is not None else \"gpytorch.settings.{}\".format(new_cls.__name__)\n\n def __call__(self, *args, **kwargs):\n warnings.warn(\n \"`{}` has moved to `gpytorch.settings.{}`.\".format(self.orig_name, self.new_cls.__name__),\n DeprecationWarning\n )\n return self.new_cls(*args, **kwargs)\n\n def __getattr__(self, name):\n return getattr(self.new_cls, name)\n\n\nfast_pred_var = _moved_beta_feature(_fast_pred_var)\nfast_pred_samples = _moved_beta_feature(_fast_pred_samples)\n\n\nclass checkpoint_kernel(_value_context):\n \"\"\"\n Should the kernel be computed in chunks with checkpointing or not? (Default, no)\n\n If `split_size = 0`:\n The kernel is computed explicitly. During training, the kernel matrix is\n kept in memory for the backward pass. This is the fastest option but the\n most memory intensive.\n If `split_size > 0`:\n The kernel is never fully computed or stored. Instead, the kernel is only\n accessed through matrix multiplication. The matrix multiplication is\n computed in `segments` chunks. This is slower, but requires significantly less memory.\n\n Default: 0\n \"\"\"\n\n _global_value = 0\n\n\nclass diagonal_correction(_feature_flag):\n \"\"\"\n Add a diagonal correction to scalable inducing point methods\n \"\"\"\n\n _state = True\n\n\nclass default_preconditioner(_feature_flag):\n \"\"\"\n Add a diagonal correction to scalable inducing point methods\n \"\"\"\n\n pass\n\n\n__all__ = [\"fast_pred_var\", \"fast_pred_samples\", \"diagonal_correction\", \"default_preconditioner\"]\n", "path": "gpytorch/beta_features.py"}]} | 1,879 | 397 |
gh_patches_debug_2264 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1410 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to post review in Safari on iPadOS
**Describe the bug**
When trying to post a long-ish review in Safari on iPadOS (desktop mode, content blockers disabled), the post button shows the spinner introduced with #1388, but the posting never concludes.
**To Reproduce**
Steps to reproduce the behavior:
1. Get an iPad pro 13.9” running iPadOS 14.7.1
2. Open your bookwyrm.social account in Safari, ensuring it runs in desktop mode and content blockers are disabled
3. Write a review of at least 2700 chars
4. Try to post it
**Expected behavior**
After some spinning, the review appears on your feed.
**Instance**
bookwyrm.social
**Extra context**
[Book I’m trying to review](https://bookwyrm.social/book/214201).
---
**Device Info:**
- Device: iPad pro 2nd gen 13.9”
- OS: iPadOS 14.7.1
- Browser: Safari
- Version N/A
</issue>
<code>
[start of bookwyrm/settings.py]
1 """ bookwyrm settings and configuration """
2 import os
3 from environs import Env
4
5 import requests
6 from django.utils.translation import gettext_lazy as _
7
8
9 env = Env()
10 DOMAIN = env("DOMAIN")
11 VERSION = "0.0.1"
12
13 PAGE_LENGTH = env("PAGE_LENGTH", 15)
14 DEFAULT_LANGUAGE = env("DEFAULT_LANGUAGE", "English")
15
16 JS_CACHE = "19447742"
17
18 # email
19 EMAIL_BACKEND = env("EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
20 EMAIL_HOST = env("EMAIL_HOST")
21 EMAIL_PORT = env("EMAIL_PORT", 587)
22 EMAIL_HOST_USER = env("EMAIL_HOST_USER")
23 EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
24 EMAIL_USE_TLS = env.bool("EMAIL_USE_TLS", True)
25 EMAIL_USE_SSL = env.bool("EMAIL_USE_SSL", False)
26 DEFAULT_FROM_EMAIL = "admin@{:s}".format(env("DOMAIN"))
27
28 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
29 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
30 LOCALE_PATHS = [
31 os.path.join(BASE_DIR, "locale"),
32 ]
33
34 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
35
36 # Preview image
37 ENABLE_PREVIEW_IMAGES = env.bool("ENABLE_PREVIEW_IMAGES", False)
38 PREVIEW_BG_COLOR = env.str("PREVIEW_BG_COLOR", "use_dominant_color_light")
39 PREVIEW_TEXT_COLOR = env.str("PREVIEW_TEXT_COLOR", "#363636")
40 PREVIEW_IMG_WIDTH = env.int("PREVIEW_IMG_WIDTH", 1200)
41 PREVIEW_IMG_HEIGHT = env.int("PREVIEW_IMG_HEIGHT", 630)
42 PREVIEW_DEFAULT_COVER_COLOR = env.str("PREVIEW_DEFAULT_COVER_COLOR", "#002549")
43
44 # Quick-start development settings - unsuitable for production
45 # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
46
47 # SECURITY WARNING: keep the secret key used in production secret!
48 SECRET_KEY = env("SECRET_KEY")
49
50 # SECURITY WARNING: don't run with debug turned on in production!
51 DEBUG = env.bool("DEBUG", True)
52 USE_HTTPS = env.bool("USE_HTTPS", False)
53
54 ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", ["*"])
55
56 # Application definition
57
58 INSTALLED_APPS = [
59 "django.contrib.admin",
60 "django.contrib.auth",
61 "django.contrib.contenttypes",
62 "django.contrib.sessions",
63 "django.contrib.messages",
64 "django.contrib.staticfiles",
65 "django.contrib.humanize",
66 "django_rename_app",
67 "bookwyrm",
68 "celery",
69 "imagekit",
70 "storages",
71 ]
72
73 MIDDLEWARE = [
74 "django.middleware.security.SecurityMiddleware",
75 "django.contrib.sessions.middleware.SessionMiddleware",
76 "django.middleware.locale.LocaleMiddleware",
77 "django.middleware.common.CommonMiddleware",
78 "django.middleware.csrf.CsrfViewMiddleware",
79 "django.contrib.auth.middleware.AuthenticationMiddleware",
80 "bookwyrm.timezone_middleware.TimezoneMiddleware",
81 "django.contrib.messages.middleware.MessageMiddleware",
82 "django.middleware.clickjacking.XFrameOptionsMiddleware",
83 ]
84
85 ROOT_URLCONF = "bookwyrm.urls"
86
87 TEMPLATES = [
88 {
89 "BACKEND": "django.template.backends.django.DjangoTemplates",
90 "DIRS": ["templates"],
91 "APP_DIRS": True,
92 "OPTIONS": {
93 "context_processors": [
94 "django.template.context_processors.debug",
95 "django.template.context_processors.request",
96 "django.contrib.auth.context_processors.auth",
97 "django.contrib.messages.context_processors.messages",
98 "bookwyrm.context_processors.site_settings",
99 ],
100 },
101 },
102 ]
103
104
105 WSGI_APPLICATION = "bookwyrm.wsgi.application"
106
107 # redis/activity streams settings
108 REDIS_ACTIVITY_HOST = env("REDIS_ACTIVITY_HOST", "localhost")
109 REDIS_ACTIVITY_PORT = env("REDIS_ACTIVITY_PORT", 6379)
110 REDIS_ACTIVITY_PASSWORD = env("REDIS_ACTIVITY_PASSWORD", None)
111
112 MAX_STREAM_LENGTH = int(env("MAX_STREAM_LENGTH", 200))
113
114 STREAMS = [
115 {"key": "home", "name": _("Home Timeline"), "shortname": _("Home")},
116 {"key": "books", "name": _("Books Timeline"), "shortname": _("Books")},
117 ]
118
119 # Database
120 # https://docs.djangoproject.com/en/3.2/ref/settings/#databases
121
122 DATABASES = {
123 "default": {
124 "ENGINE": "django.db.backends.postgresql_psycopg2",
125 "NAME": env("POSTGRES_DB", "fedireads"),
126 "USER": env("POSTGRES_USER", "fedireads"),
127 "PASSWORD": env("POSTGRES_PASSWORD", "fedireads"),
128 "HOST": env("POSTGRES_HOST", ""),
129 "PORT": env("POSTGRES_PORT", 5432),
130 },
131 }
132
133
134 LOGIN_URL = "/login/"
135 AUTH_USER_MODEL = "bookwyrm.User"
136
137 # Password validation
138 # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
139
140 # pylint: disable=line-too-long
141 AUTH_PASSWORD_VALIDATORS = [
142 {
143 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
144 },
145 {
146 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
147 },
148 {
149 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
150 },
151 {
152 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
153 },
154 ]
155
156
157 # Internationalization
158 # https://docs.djangoproject.com/en/3.2/topics/i18n/
159
160 LANGUAGE_CODE = "en-us"
161 LANGUAGES = [
162 ("en-us", _("English")),
163 ("de-de", _("German")),
164 ("es", _("Spanish")),
165 ("fr-fr", _("French")),
166 ("zh-hans", _("Simplified Chinese")),
167 ("zh-hant", _("Traditional Chinese")),
168 ]
169
170
171 TIME_ZONE = "UTC"
172
173 USE_I18N = True
174
175 USE_L10N = True
176
177 USE_TZ = True
178
179
180 USER_AGENT = "%s (BookWyrm/%s; +https://%s/)" % (
181 requests.utils.default_user_agent(),
182 VERSION,
183 DOMAIN,
184 )
185
186 # Imagekit generated thumbnails
187 ENABLE_THUMBNAIL_GENERATION = env.bool("ENABLE_THUMBNAIL_GENERATION", False)
188 IMAGEKIT_CACHEFILE_DIR = "thumbnails"
189
190 # Static files (CSS, JavaScript, Images)
191 # https://docs.djangoproject.com/en/3.2/howto/static-files/
192
193 PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
194
195 # Storage
196
197 PROTOCOL = "http"
198 if USE_HTTPS:
199 PROTOCOL = "https"
200
201 USE_S3 = env.bool("USE_S3", False)
202
203 if USE_S3:
204 # AWS settings
205 AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID")
206 AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY")
207 AWS_STORAGE_BUCKET_NAME = env("AWS_STORAGE_BUCKET_NAME")
208 AWS_S3_CUSTOM_DOMAIN = env("AWS_S3_CUSTOM_DOMAIN")
209 AWS_S3_REGION_NAME = env("AWS_S3_REGION_NAME", "")
210 AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL")
211 AWS_DEFAULT_ACL = "public-read"
212 AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
213 # S3 Static settings
214 STATIC_LOCATION = "static"
215 STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATIC_LOCATION)
216 STATICFILES_STORAGE = "bookwyrm.storage_backends.StaticStorage"
217 # S3 Media settings
218 MEDIA_LOCATION = "images"
219 MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)
220 MEDIA_FULL_URL = MEDIA_URL
221 DEFAULT_FILE_STORAGE = "bookwyrm.storage_backends.ImagesStorage"
222 # I don't know if it's used, but the site crashes without it
223 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
224 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
225 else:
226 STATIC_URL = "/static/"
227 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
228 MEDIA_URL = "/images/"
229 MEDIA_FULL_URL = "%s://%s%s" % (PROTOCOL, DOMAIN, MEDIA_URL)
230 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
231
[end of bookwyrm/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py
--- a/bookwyrm/settings.py
+++ b/bookwyrm/settings.py
@@ -13,7 +13,7 @@
PAGE_LENGTH = env("PAGE_LENGTH", 15)
DEFAULT_LANGUAGE = env("DEFAULT_LANGUAGE", "English")
-JS_CACHE = "19447742"
+JS_CACHE = "e5832a26"
# email
EMAIL_BACKEND = env("EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
| {"golden_diff": "diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py\n--- a/bookwyrm/settings.py\n+++ b/bookwyrm/settings.py\n@@ -13,7 +13,7 @@\n PAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\n DEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n \n-JS_CACHE = \"19447742\"\n+JS_CACHE = \"e5832a26\"\n \n # email\n EMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\n", "issue": "Unable to post review in Safari on iPadOS\n**Describe the bug**\nWhen trying to post a long-ish review in Safari on iPadOS (desktop mode, content blockers disabled), the post button shows the spinner introduced with #1388, but the posting never concludes.\n\n**To Reproduce**\nSteps to reproduce the behavior:\n1. Get an iPad pro 13.9\u201d running iPadOS 14.7.1\n2. Open your bookwyrm.social account in Safari, ensuring it runs in desktop mode and content blockers are disabled\n3. Write a review of at least 2700 chars\n4. Try to post it\n\n**Expected behavior**\nAfter some spinning, the review appears on your feed.\n\n**Instance**\nbookwyrm.social\n\n**Extra context**\n[Book I\u2019m trying to review](https://bookwyrm.social/book/214201).\n\n---\n\n**Device Info:**\n - Device: iPad pro 2nd gen 13.9\u201d\n - OS: iPadOS 14.7.1\n - Browser: Safari\n - Version N/A\n", "before_files": [{"content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.0.1\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"19447742\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nDEFAULT_FROM_EMAIL = \"admin@{:s}\".format(env(\"DOMAIN\"))\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.timezone_middleware.TimezoneMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"fedireads\"),\n \"USER\": env(\"POSTGRES_USER\", \"fedireads\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"fedireads\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"POSTGRES_PORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"German\")),\n (\"es\", _(\"Spanish\")),\n (\"fr-fr\", _(\"French\")),\n (\"zh-hans\", _(\"Simplified Chinese\")),\n (\"zh-hant\", _(\"Traditional Chinese\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nUSER_AGENT = \"%s (BookWyrm/%s; +https://%s/)\" % (\n requests.utils.default_user_agent(),\n VERSION,\n DOMAIN,\n)\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, STATIC_LOCATION)\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)\n MEDIA_FULL_URL = MEDIA_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = \"%s://%s%s\" % (PROTOCOL, DOMAIN, MEDIA_URL)\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py"}]} | 3,145 | 117 |
gh_patches_debug_4276 | rasdani/github-patches | git_diff | awslabs__gluonts-68 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tutorial notebook Exception
I downloaded the notebook available on https://gluon-ts.mxnet.io/examples/forecasting/tutorial.html, and run it on my local laptop (Darwin Kernel Version 16.7.0, Anaconda3 Distro, Python 3.7.3, Jupyter 4.4.0, gluonts 0.1.1) and get the following exception at:
```
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.trainer import Trainer
```
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/python/anaconda3/lib/python3.7/site-packages/pydantic/validators.py in find_validators(type_, arbitrary_types_allowed)
261 try:
--> 262 if issubclass(type_, val_type):
263 return validators
TypeError: issubclass() arg 1 must be a class
The above exception was the direct cause of the following exception:
RuntimeError Traceback (most recent call last)
<ipython-input-13-1fca1cb620ad> in <module>
----> 1 from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
2 from gluonts.trainer import Trainer
~/python/anaconda3/lib/python3.7/site-packages/gluonts/model/simple_feedforward/__init__.py in <module>
1 # Relative imports
----> 2 from ._estimator import SimpleFeedForwardEstimator
3
4 __all__ = ['SimpleFeedForwardEstimator']
~/python/anaconda3/lib/python3.7/site-packages/gluonts/model/simple_feedforward/_estimator.py in <module>
7 # First-party imports
8 from gluonts.core.component import validated
----> 9 from gluonts.distribution import DistributionOutput, StudentTOutput
10 from gluonts.model.estimator import GluonEstimator
11 from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/__init__.py in <module>
14 # Relative imports
15 from . import bijection
---> 16 from .binned import Binned, BinnedOutput
17 from .distribution import Distribution
18 from .distribution_output import DistributionOutput
~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/binned.py in <module>
166
167
--> 168 class BinnedOutput(DistributionOutput):
169 distr_cls: type = Binned
170
~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/binned.py in BinnedOutput()
170
171 @validated()
--> 172 def __init__(self, bin_centers: List) -> None:
173 # cannot pass directly nd.array because it is not serializable
174 bc = mx.nd.array(bin_centers)
~/python/anaconda3/lib/python3.7/site-packages/gluonts/core/component.py in validator(ctor)
167 f'{ctor_clsnme}Model',
168 __config__=ConfigBase.Config,
--> 169 **ctor_fields,
170 )
171 else:
~/python/anaconda3/lib/python3.7/site-packages/pydantic/main.py in create_model(model_name, __config__, __base__, **field_definitions)
408 annotation=f_annotation,
409 class_validators=vg.get_validators(f_name),
--> 410 config=config,
411 )
412
~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in infer(cls, name, value, annotation, class_validators, config)
105 required=required,
106 model_config=config,
--> 107 schema=schema,
108 )
109
~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in __init__(self, name, type_, class_validators, default, required, model_config, alias, allow_none, schema)
85 self.shape: Shape = Shape.SINGLETON
86 self._schema: Schema = schema
---> 87 self.prepare()
88
89 @classmethod
~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in prepare(self)
134
135 self._populate_sub_fields()
--> 136 self._populate_validators()
137
138 def schema(self, by_alias=True):
~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in _populate_validators(self)
264 *tuple(v.func for v in self.class_validators if not v.whole and v.pre),
265 *(get_validators() if get_validators else find_validators(self.type_,
--> 266 self.model_config.arbitrary_types_allowed)),
267 *tuple(v.func for v in self.class_validators if not v.whole and not v.pre),
268 )
~/python/anaconda3/lib/python3.7/site-packages/pydantic/validators.py in find_validators(type_, arbitrary_types_allowed)
263 return validators
264 except TypeError as e:
--> 265 raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') from e
266
267 if arbitrary_types_allowed:
RuntimeError: error checking inheritance of ~T (type: T)
```
</issue>
<code>
[start of src/gluonts/distribution/binned.py]
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License").
4 # You may not use this file except in compliance with the License.
5 # A copy of the License is located at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # or in the "license" file accompanying this file. This file is distributed
10 # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 # express or implied. See the License for the specific language governing
12 # permissions and limitations under the License.
13
14 # Standard library imports
15 from typing import Tuple, List
16
17 # Third-party imports
18 import mxnet as mx
19 from mxnet import gluon
20
21 # First-party imports
22 from gluonts.core.component import validated
23 from gluonts.model.common import Tensor
24
25 # Relative imports
26 from .distribution import Distribution, _sample_multiple, getF
27 from .distribution_output import DistributionOutput
28
29
30 class Binned(Distribution):
31 r"""
32 A binned distribution defined by a set of bins via
33 bin centers and bin probabilities.
34
35 Parameters
36 ----------
37 bin_probs
38 Tensor containing the bin probabilities, of shape `(*batch_shape, num_bins)`.
39 bin_centers
40 Tensor containing the bin centers, of shape `(*batch_shape, num_bins)`.
41 F
42 """
43
44 is_reparameterizable = False
45
46 def __init__(self, bin_probs: Tensor, bin_centers: Tensor, F=None) -> None:
47 self.bin_centers = bin_centers
48 self.bin_probs = bin_probs
49 self.F = F if F else getF(bin_probs)
50
51 self.bin_edges = Binned._compute_edges(self.F, bin_centers)
52
53 @staticmethod
54 def _compute_edges(F, bin_centers: Tensor) -> Tensor:
55 r"""
56 Computes the edges of the bins based on the centers. The first and last edge are set to :math:`10^{-10}` and
57 :math:`10^{10}`, repsectively.
58
59 Parameters
60 ----------
61 F
62 bin_centers
63 Tensor of shape `(*batch_shape, num_bins)`.
64
65 Returns
66 -------
67 Tensor
68 Tensor of shape (*gamma.shape, num_bins+1)
69 """
70
71 low = (
72 F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))
73 - 1.0e10
74 )
75 high = (
76 F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))
77 + 1.0e10
78 )
79
80 means = (
81 bin_centers.slice_axis(axis=-1, begin=1, end=None)
82 + bin_centers.slice_axis(axis=-1, begin=0, end=-1)
83 ) / 2.0
84
85 return F.concat(low, means, high, dim=-1)
86
87 @property
88 def batch_shape(self) -> Tuple:
89 return self.bin_centers.shape[:-1]
90
91 @property
92 def event_shape(self) -> Tuple:
93 return ()
94
95 @property
96 def event_dim(self) -> int:
97 return 0
98
99 @property
100 def mean(self):
101 return (self.bin_probs * self.bin_centers).sum(axis=-1)
102
103 @property
104 def stddev(self):
105 Ex2 = (self.bin_probs * self.bin_centers.square()).sum(axis=-1)
106 return (Ex2 - self.mean.square()).sqrt()
107
108 def log_prob(self, x):
109 x = x.expand_dims(axis=-1)
110 # TODO: when mxnet has searchsorted replace this
111 left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)
112 right_edges = self.bin_edges.slice_axis(axis=-1, begin=1, end=None)
113 mask = self.F.broadcast_lesser_equal(
114 left_edges, x
115 ) * self.F.broadcast_lesser(x, right_edges)
116 return (self.bin_probs.log() * mask).sum(axis=-1)
117
118 def sample(self, num_samples=None):
119 def s(bin_probs):
120 F = self.F
121 indices = F.sample_multinomial(bin_probs)
122 if num_samples is None:
123 return self.bin_centers.pick(indices, -1).reshape_like(
124 F.zeros_like(indices.astype('float32'))
125 )
126 else:
127 return F.repeat(
128 F.expand_dims(self.bin_centers, axis=0),
129 repeats=num_samples,
130 axis=0,
131 ).pick(indices, -1)
132
133 return _sample_multiple(s, self.bin_probs, num_samples=num_samples)
134
135
136 class BinnedArgs(gluon.HybridBlock):
137 def __init__(self, bin_centers: mx.nd.NDArray, **kwargs) -> None:
138 super().__init__(**kwargs)
139 with self.name_scope():
140 self.bin_centers = self.params.get_constant(
141 'bincenters', bin_centers
142 )
143 self.num_bins = bin_centers.shape[0]
144
145 # needs to be named self.proj for consistency with the ArgProj class and the inference tests
146 self.proj = gluon.nn.HybridSequential()
147 self.proj.add(
148 gluon.nn.Dense(
149 self.num_bins,
150 prefix='binproj',
151 flatten=False,
152 weight_initializer=mx.init.Xavier(),
153 )
154 )
155 self.proj.add(gluon.nn.HybridLambda('softmax'))
156
157 def hybrid_forward(
158 self, F, x: Tensor, bin_centers: Tensor, **kwargs
159 ) -> Tuple[Tensor, Tensor]:
160 ps = self.proj(x)
161 return (
162 ps.reshape(shape=(-2, -1, self.num_bins), reverse=1),
163 # For some reason hybridize does not work when returning constants directly
164 bin_centers + 0.0,
165 )
166
167
168 class BinnedOutput(DistributionOutput):
169 distr_cls: type = Binned
170
171 @validated()
172 def __init__(self, bin_centers: List) -> None:
173 # cannot pass directly nd.array because it is not serializable
174 bc = mx.nd.array(bin_centers)
175 assert len(bc.shape) == 1
176 self.bin_centers = bc
177
178 def get_args_proj(self, *args, **kwargs) -> gluon.nn.HybridBlock:
179 return BinnedArgs(self.bin_centers)
180
181 def distribution(self, args, scale=None) -> Binned:
182 probs, centers = args
183 F = getF(probs)
184
185 if scale is not None:
186 centers = F.broadcast_mul(centers, scale).expand_dims(axis=-2)
187 else:
188 centers = F.broadcast_mul(
189 centers, F.ones_like(probs.slice_axis(axis=-2, begin=0, end=1))
190 )
191 return Binned(probs, centers)
192
193 @property
194 def event_shape(self) -> Tuple:
195 return ()
196
[end of src/gluonts/distribution/binned.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/gluonts/distribution/binned.py b/src/gluonts/distribution/binned.py
--- a/src/gluonts/distribution/binned.py
+++ b/src/gluonts/distribution/binned.py
@@ -169,7 +169,7 @@
distr_cls: type = Binned
@validated()
- def __init__(self, bin_centers: List) -> None:
+ def __init__(self, bin_centers: List[float]) -> None:
# cannot pass directly nd.array because it is not serializable
bc = mx.nd.array(bin_centers)
assert len(bc.shape) == 1
| {"golden_diff": "diff --git a/src/gluonts/distribution/binned.py b/src/gluonts/distribution/binned.py\n--- a/src/gluonts/distribution/binned.py\n+++ b/src/gluonts/distribution/binned.py\n@@ -169,7 +169,7 @@\n distr_cls: type = Binned\n \n @validated()\n- def __init__(self, bin_centers: List) -> None:\n+ def __init__(self, bin_centers: List[float]) -> None:\n # cannot pass directly nd.array because it is not serializable\n bc = mx.nd.array(bin_centers)\n assert len(bc.shape) == 1\n", "issue": "Tutorial notebook Exception\nI downloaded the notebook available on https://gluon-ts.mxnet.io/examples/forecasting/tutorial.html, and run it on my local laptop (Darwin Kernel Version 16.7.0, Anaconda3 Distro, Python 3.7.3, Jupyter 4.4.0, gluonts 0.1.1) and get the following exception at:\r\n\r\n```\r\nfrom gluonts.model.simple_feedforward import SimpleFeedForwardEstimator\r\nfrom gluonts.trainer import Trainer\r\n```\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/validators.py in find_validators(type_, arbitrary_types_allowed)\r\n 261 try:\r\n--> 262 if issubclass(type_, val_type):\r\n 263 return validators\r\n\r\nTypeError: issubclass() arg 1 must be a class\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nRuntimeError Traceback (most recent call last)\r\n<ipython-input-13-1fca1cb620ad> in <module>\r\n----> 1 from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator\r\n 2 from gluonts.trainer import Trainer\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/model/simple_feedforward/__init__.py in <module>\r\n 1 # Relative imports\r\n----> 2 from ._estimator import SimpleFeedForwardEstimator\r\n 3 \r\n 4 __all__ = ['SimpleFeedForwardEstimator']\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/model/simple_feedforward/_estimator.py in <module>\r\n 7 # First-party imports\r\n 8 from gluonts.core.component import validated\r\n----> 9 from gluonts.distribution import DistributionOutput, StudentTOutput\r\n 10 from gluonts.model.estimator import GluonEstimator\r\n 11 from gluonts.model.predictor import Predictor, RepresentableBlockPredictor\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/__init__.py in <module>\r\n 14 # Relative imports\r\n 15 from . import bijection\r\n---> 16 from .binned import Binned, BinnedOutput\r\n 17 from .distribution import Distribution\r\n 18 from .distribution_output import DistributionOutput\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/binned.py in <module>\r\n 166 \r\n 167 \r\n--> 168 class BinnedOutput(DistributionOutput):\r\n 169 distr_cls: type = Binned\r\n 170 \r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/binned.py in BinnedOutput()\r\n 170 \r\n 171 @validated()\r\n--> 172 def __init__(self, bin_centers: List) -> None:\r\n 173 # cannot pass directly nd.array because it is not serializable\r\n 174 bc = mx.nd.array(bin_centers)\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/core/component.py in validator(ctor)\r\n 167 f'{ctor_clsnme}Model',\r\n 168 __config__=ConfigBase.Config,\r\n--> 169 **ctor_fields,\r\n 170 )\r\n 171 else:\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/main.py in create_model(model_name, __config__, __base__, **field_definitions)\r\n 408 annotation=f_annotation,\r\n 409 class_validators=vg.get_validators(f_name),\r\n--> 410 config=config,\r\n 411 )\r\n 412 \r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in infer(cls, name, value, annotation, class_validators, config)\r\n 105 required=required,\r\n 106 model_config=config,\r\n--> 107 schema=schema,\r\n 108 )\r\n 109 \r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in __init__(self, name, type_, class_validators, default, required, model_config, alias, allow_none, schema)\r\n 85 self.shape: Shape = Shape.SINGLETON\r\n 86 self._schema: Schema = schema\r\n---> 87 self.prepare()\r\n 88 \r\n 89 @classmethod\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in prepare(self)\r\n 134 \r\n 135 self._populate_sub_fields()\r\n--> 136 self._populate_validators()\r\n 137 \r\n 138 def schema(self, by_alias=True):\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in _populate_validators(self)\r\n 264 *tuple(v.func for v in self.class_validators if not v.whole and v.pre),\r\n 265 *(get_validators() if get_validators else find_validators(self.type_,\r\n--> 266 self.model_config.arbitrary_types_allowed)),\r\n 267 *tuple(v.func for v in self.class_validators if not v.whole and not v.pre),\r\n 268 )\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/validators.py in find_validators(type_, arbitrary_types_allowed)\r\n 263 return validators\r\n 264 except TypeError as e:\r\n--> 265 raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') from e\r\n 266 \r\n 267 if arbitrary_types_allowed:\r\n\r\nRuntimeError: error checking inheritance of ~T (type: T)\r\n```\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Standard library imports\nfrom typing import Tuple, List\n\n# Third-party imports\nimport mxnet as mx\nfrom mxnet import gluon\n\n# First-party imports\nfrom gluonts.core.component import validated\nfrom gluonts.model.common import Tensor\n\n# Relative imports\nfrom .distribution import Distribution, _sample_multiple, getF\nfrom .distribution_output import DistributionOutput\n\n\nclass Binned(Distribution):\n r\"\"\"\n A binned distribution defined by a set of bins via\n bin centers and bin probabilities.\n\n Parameters\n ----------\n bin_probs\n Tensor containing the bin probabilities, of shape `(*batch_shape, num_bins)`.\n bin_centers\n Tensor containing the bin centers, of shape `(*batch_shape, num_bins)`.\n F\n \"\"\"\n\n is_reparameterizable = False\n\n def __init__(self, bin_probs: Tensor, bin_centers: Tensor, F=None) -> None:\n self.bin_centers = bin_centers\n self.bin_probs = bin_probs\n self.F = F if F else getF(bin_probs)\n\n self.bin_edges = Binned._compute_edges(self.F, bin_centers)\n\n @staticmethod\n def _compute_edges(F, bin_centers: Tensor) -> Tensor:\n r\"\"\"\n Computes the edges of the bins based on the centers. The first and last edge are set to :math:`10^{-10}` and\n :math:`10^{10}`, repsectively.\n\n Parameters\n ----------\n F\n bin_centers\n Tensor of shape `(*batch_shape, num_bins)`.\n\n Returns\n -------\n Tensor\n Tensor of shape (*gamma.shape, num_bins+1)\n \"\"\"\n\n low = (\n F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))\n - 1.0e10\n )\n high = (\n F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))\n + 1.0e10\n )\n\n means = (\n bin_centers.slice_axis(axis=-1, begin=1, end=None)\n + bin_centers.slice_axis(axis=-1, begin=0, end=-1)\n ) / 2.0\n\n return F.concat(low, means, high, dim=-1)\n\n @property\n def batch_shape(self) -> Tuple:\n return self.bin_centers.shape[:-1]\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n @property\n def event_dim(self) -> int:\n return 0\n\n @property\n def mean(self):\n return (self.bin_probs * self.bin_centers).sum(axis=-1)\n\n @property\n def stddev(self):\n Ex2 = (self.bin_probs * self.bin_centers.square()).sum(axis=-1)\n return (Ex2 - self.mean.square()).sqrt()\n\n def log_prob(self, x):\n x = x.expand_dims(axis=-1)\n # TODO: when mxnet has searchsorted replace this\n left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)\n right_edges = self.bin_edges.slice_axis(axis=-1, begin=1, end=None)\n mask = self.F.broadcast_lesser_equal(\n left_edges, x\n ) * self.F.broadcast_lesser(x, right_edges)\n return (self.bin_probs.log() * mask).sum(axis=-1)\n\n def sample(self, num_samples=None):\n def s(bin_probs):\n F = self.F\n indices = F.sample_multinomial(bin_probs)\n if num_samples is None:\n return self.bin_centers.pick(indices, -1).reshape_like(\n F.zeros_like(indices.astype('float32'))\n )\n else:\n return F.repeat(\n F.expand_dims(self.bin_centers, axis=0),\n repeats=num_samples,\n axis=0,\n ).pick(indices, -1)\n\n return _sample_multiple(s, self.bin_probs, num_samples=num_samples)\n\n\nclass BinnedArgs(gluon.HybridBlock):\n def __init__(self, bin_centers: mx.nd.NDArray, **kwargs) -> None:\n super().__init__(**kwargs)\n with self.name_scope():\n self.bin_centers = self.params.get_constant(\n 'bincenters', bin_centers\n )\n self.num_bins = bin_centers.shape[0]\n\n # needs to be named self.proj for consistency with the ArgProj class and the inference tests\n self.proj = gluon.nn.HybridSequential()\n self.proj.add(\n gluon.nn.Dense(\n self.num_bins,\n prefix='binproj',\n flatten=False,\n weight_initializer=mx.init.Xavier(),\n )\n )\n self.proj.add(gluon.nn.HybridLambda('softmax'))\n\n def hybrid_forward(\n self, F, x: Tensor, bin_centers: Tensor, **kwargs\n ) -> Tuple[Tensor, Tensor]:\n ps = self.proj(x)\n return (\n ps.reshape(shape=(-2, -1, self.num_bins), reverse=1),\n # For some reason hybridize does not work when returning constants directly\n bin_centers + 0.0,\n )\n\n\nclass BinnedOutput(DistributionOutput):\n distr_cls: type = Binned\n\n @validated()\n def __init__(self, bin_centers: List) -> None:\n # cannot pass directly nd.array because it is not serializable\n bc = mx.nd.array(bin_centers)\n assert len(bc.shape) == 1\n self.bin_centers = bc\n\n def get_args_proj(self, *args, **kwargs) -> gluon.nn.HybridBlock:\n return BinnedArgs(self.bin_centers)\n\n def distribution(self, args, scale=None) -> Binned:\n probs, centers = args\n F = getF(probs)\n\n if scale is not None:\n centers = F.broadcast_mul(centers, scale).expand_dims(axis=-2)\n else:\n centers = F.broadcast_mul(\n centers, F.ones_like(probs.slice_axis(axis=-2, begin=0, end=1))\n )\n return Binned(probs, centers)\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n", "path": "src/gluonts/distribution/binned.py"}]} | 3,855 | 145 |
gh_patches_debug_19935 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-863 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using exclude paths for all instances of a Flask app
This is less a feature request than a question about how to use `OPENTELEMETRY_PYTHON_FLASK_EXCLUDED_HOSTS` to exclude routes from being traced for all instances of a flask application (i.e. regardless of host being deployed to). I initially thought something like below would work -
```python
from os import environ
environ["OPENTELEMETRY_PYTHON_FLASK_EXCLUDED_HOSTS"] = "/route1,/route2"
```
But it appears that this would be executed after the `Configuration` singleton gets initialized, and won't get picked up. Calling `configuration.Configuration._reset()` after setting `environ` seems like a hack to make this work but the docs state it's not for production code and it feels hacky as well.
Of course setting the environment on the deployed to host would solve this. The issue is that in some instances we may have a flask application that has e.g. a ping route that should never be traced, and we may not have control over the environment variables of the host being deployed to (so the app has to somehow set the exclude paths). So I suppose my question is, can the application programmatically set it's own exclude paths? Thanks in advance for the help.
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Simple configuration manager
17
18 This is a configuration manager for OpenTelemetry. It reads configuration
19 values from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose
20 characters are only alphanumeric characters and unserscores, except for the
21 first character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.
22
23 For example, these environment variables will be read:
24
25 1. ``OPENTELEMETRY_PYTHON_SOMETHING``
26 2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``
27 3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``
28 4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``
29 5. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``
30
31 These won't:
32
33 1. ``OPENTELEMETRY_PYTH_SOMETHING``
34 2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``
35 3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
36
37 The values stored in the environment variables can be found in an instance of
38 ``opentelemetry.configuration.Configuration``. This class can be instantiated
39 freely because instantiating it returns always the same object.
40
41 For example, if the environment variable
42 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then
43 ``Configuration().meter_provider == "my_meter_provider"`` would be ``True``.
44
45 Non defined attributes will always return ``None``. This is intended to make it
46 easier to use the ``Configuration`` object in actual code, because it won't be
47 necessary to check for the attribute to be defined first.
48
49 Environment variables used by OpenTelemetry
50 -------------------------------------------
51
52 1. OPENTELEMETRY_PYTHON_METER_PROVIDER
53 2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER
54
55 The value of these environment variables should be the name of the entry point
56 that points to the class that implements either provider. This OpenTelemetry
57 API package provides one entry point for each, which can be found in the
58 setup.py file::
59
60 entry_points={
61 ...
62 "opentelemetry_meter_provider": [
63 "default_meter_provider = "
64 "opentelemetry.metrics:DefaultMeterProvider"
65 ],
66 "opentelemetry_tracer_provider": [
67 "default_tracer_provider = "
68 "opentelemetry.trace:DefaultTracerProvider"
69 ],
70 }
71
72 To use the meter provider above, then the
73 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to
74 ``"default_meter_provider"`` (this is not actually necessary since the
75 OpenTelemetry API provided providers are the default ones used if no
76 configuration is found in the environment variables).
77
78 Configuration values that are exactly ``"True"`` or ``"False"`` will be
79 converted to its boolean values of ``True`` and ``False`` respectively.
80
81 Configuration values that can be casted to integers or floats will be casted.
82
83 This object can be used by any OpenTelemetry component, native or external.
84 For that reason, the ``Configuration`` object is designed to be immutable.
85 If a component would change the value of one of the ``Configuration`` object
86 attributes then another component that relied on that value may break, leading
87 to bugs that are very hard to debug. To avoid this situation, the preferred
88 approach for components that need a different value than the one provided by
89 the ``Configuration`` object is to implement a mechanism that allows the user
90 to override this value instead of changing it.
91 """
92
93 from os import environ
94 from re import fullmatch
95 from typing import ClassVar, Dict, Optional, TypeVar, Union
96
97 ConfigValue = Union[str, bool, int, float]
98 _T = TypeVar("_T", ConfigValue, Optional[ConfigValue])
99
100
101 class Configuration:
102 _instance = None # type: ClassVar[Optional[Configuration]]
103 _config_map = {} # type: ClassVar[Dict[str, ConfigValue]]
104
105 def __new__(cls) -> "Configuration":
106 if cls._instance is not None:
107 instance = cls._instance
108 else:
109
110 instance = super().__new__(cls)
111 for key, value_str in environ.items():
112
113 match = fullmatch(
114 r"OPENTELEMETRY_PYTHON_([A-Za-z_][\w_]*)", key
115 )
116
117 if match is not None:
118
119 key = match.group(1)
120 value = value_str # type: ConfigValue
121
122 if value_str == "True":
123 value = True
124 elif value_str == "False":
125 value = False
126 else:
127 try:
128 value = int(value_str)
129 except ValueError:
130 pass
131 try:
132 value = float(value_str)
133 except ValueError:
134 pass
135
136 instance._config_map[key] = value
137
138 cls._instance = instance
139
140 return instance
141
142 def __getattr__(self, name: str) -> Optional[ConfigValue]:
143 return self._config_map.get(name)
144
145 def __setattr__(self, key: str, val: ConfigValue) -> None:
146 if key == "_config_map":
147 super().__setattr__(key, val)
148 else:
149 raise AttributeError(key)
150
151 def get(self, name: str, default: _T) -> _T:
152 """Use this typed method for dynamic access instead of `getattr`
153
154 :rtype: str or bool or int or float or None
155 """
156 val = self._config_map.get(name, default)
157 return val
158
159 @classmethod
160 def _reset(cls) -> None:
161 """
162 This method "resets" the global configuration attributes
163
164 It is not intended to be used by production code but by testing code
165 only.
166 """
167
168 if cls._instance:
169 cls._instance._config_map.clear() # pylint: disable=protected-access
170 cls._instance = None
171
[end of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
@@ -142,19 +142,18 @@
def __getattr__(self, name: str) -> Optional[ConfigValue]:
return self._config_map.get(name)
- def __setattr__(self, key: str, val: ConfigValue) -> None:
- if key == "_config_map":
- super().__setattr__(key, val)
+ def __setattr__(self, name: str, value: ConfigValue) -> None:
+ if name not in self._config_map.keys():
+ self._config_map[name] = value
else:
- raise AttributeError(key)
+ raise AttributeError(name)
def get(self, name: str, default: _T) -> _T:
"""Use this typed method for dynamic access instead of `getattr`
:rtype: str or bool or int or float or None
"""
- val = self._config_map.get(name, default)
- return val
+ return self._config_map.get(name, default)
@classmethod
def _reset(cls) -> None:
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n@@ -142,19 +142,18 @@\n def __getattr__(self, name: str) -> Optional[ConfigValue]:\n return self._config_map.get(name)\n \n- def __setattr__(self, key: str, val: ConfigValue) -> None:\n- if key == \"_config_map\":\n- super().__setattr__(key, val)\n+ def __setattr__(self, name: str, value: ConfigValue) -> None:\n+ if name not in self._config_map.keys():\n+ self._config_map[name] = value\n else:\n- raise AttributeError(key)\n+ raise AttributeError(name)\n \n def get(self, name: str, default: _T) -> _T:\n \"\"\"Use this typed method for dynamic access instead of `getattr`\n \n :rtype: str or bool or int or float or None\n \"\"\"\n- val = self._config_map.get(name, default)\n- return val\n+ return self._config_map.get(name, default)\n \n @classmethod\n def _reset(cls) -> None:\n", "issue": "Using exclude paths for all instances of a Flask app\nThis is less a feature request than a question about how to use `OPENTELEMETRY_PYTHON_FLASK_EXCLUDED_HOSTS` to exclude routes from being traced for all instances of a flask application (i.e. regardless of host being deployed to). I initially thought something like below would work -\r\n\r\n```python\r\nfrom os import environ\r\nenviron[\"OPENTELEMETRY_PYTHON_FLASK_EXCLUDED_HOSTS\"] = \"/route1,/route2\"\r\n```\r\n\r\nBut it appears that this would be executed after the `Configuration` singleton gets initialized, and won't get picked up. Calling `configuration.Configuration._reset()` after setting `environ` seems like a hack to make this work but the docs state it's not for production code and it feels hacky as well.\r\n\r\nOf course setting the environment on the deployed to host would solve this. The issue is that in some instances we may have a flask application that has e.g. a ping route that should never be traced, and we may not have control over the environment variables of the host being deployed to (so the app has to somehow set the exclude paths). So I suppose my question is, can the application programmatically set it's own exclude paths? Thanks in advance for the help.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose\ncharacters are only alphanumeric characters and unserscores, except for the\nfirst character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.\n\nFor example, these environment variables will be read:\n\n1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``\n5. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns always the same object.\n\nFor example, if the environment variable\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OPENTELEMETRY_PYTHON_METER_PROVIDER\n2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to\n``\"default_meter_provider\"`` (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\nConfiguration values that are exactly ``\"True\"`` or ``\"False\"`` will be\nconverted to its boolean values of ``True`` and ``False`` respectively.\n\nConfiguration values that can be casted to integers or floats will be casted.\n\nThis object can be used by any OpenTelemetry component, native or external.\nFor that reason, the ``Configuration`` object is designed to be immutable.\nIf a component would change the value of one of the ``Configuration`` object\nattributes then another component that relied on that value may break, leading\nto bugs that are very hard to debug. To avoid this situation, the preferred\napproach for components that need a different value than the one provided by\nthe ``Configuration`` object is to implement a mechanism that allows the user\nto override this value instead of changing it.\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\nfrom typing import ClassVar, Dict, Optional, TypeVar, Union\n\nConfigValue = Union[str, bool, int, float]\n_T = TypeVar(\"_T\", ConfigValue, Optional[ConfigValue])\n\n\nclass Configuration:\n _instance = None # type: ClassVar[Optional[Configuration]]\n _config_map = {} # type: ClassVar[Dict[str, ConfigValue]]\n\n def __new__(cls) -> \"Configuration\":\n if cls._instance is not None:\n instance = cls._instance\n else:\n\n instance = super().__new__(cls)\n for key, value_str in environ.items():\n\n match = fullmatch(\n r\"OPENTELEMETRY_PYTHON_([A-Za-z_][\\w_]*)\", key\n )\n\n if match is not None:\n\n key = match.group(1)\n value = value_str # type: ConfigValue\n\n if value_str == \"True\":\n value = True\n elif value_str == \"False\":\n value = False\n else:\n try:\n value = int(value_str)\n except ValueError:\n pass\n try:\n value = float(value_str)\n except ValueError:\n pass\n\n instance._config_map[key] = value\n\n cls._instance = instance\n\n return instance\n\n def __getattr__(self, name: str) -> Optional[ConfigValue]:\n return self._config_map.get(name)\n\n def __setattr__(self, key: str, val: ConfigValue) -> None:\n if key == \"_config_map\":\n super().__setattr__(key, val)\n else:\n raise AttributeError(key)\n\n def get(self, name: str, default: _T) -> _T:\n \"\"\"Use this typed method for dynamic access instead of `getattr`\n\n :rtype: str or bool or int or float or None\n \"\"\"\n val = self._config_map.get(name, default)\n return val\n\n @classmethod\n def _reset(cls) -> None:\n \"\"\"\n This method \"resets\" the global configuration attributes\n\n It is not intended to be used by production code but by testing code\n only.\n \"\"\"\n\n if cls._instance:\n cls._instance._config_map.clear() # pylint: disable=protected-access\n cls._instance = None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py"}]} | 2,647 | 298 |
gh_patches_debug_1686 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1064 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for Django 3.2
Django 3.2 is slated for a release in April. Running the test suite, a few problems came up:
- [ ] App label needs to be a valid Python identifier, ours is not (renaming it from `elasticapm.contrib.django` to `elasticapm` should suffice)
Several test failures:
- [ ] `test_broken_500_handler_with_middleware`
- [ ] `test_404_middleware`
- [ ] `test_response_error_id_middleware`
- [ ] `test_django_logging_request_kwarg`
- [ ] `test_django_logging_middleware`
- [ ] `test_capture_body_config_is_dynamic_for_transactions`
- [ ] `test_capture_headers_config_is_dynamic_for_transactions`
- [ ] `test_capture_headers`
- [ ] `test_transaction_name_from_route`
Most of these look similar in nature, I suspect an issue with middlewares. Nothing jumps out in the [release notes](https://docs.djangoproject.com/en/3.2/releases/3.2/), though.
</issue>
<code>
[start of elasticapm/contrib/django/apps.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from functools import partial
32
33 from django.apps import AppConfig
34 from django.conf import settings as django_settings
35
36 from elasticapm.conf import constants
37 from elasticapm.contrib.django.client import get_client
38 from elasticapm.utils.disttracing import TraceParent
39 from elasticapm.utils.logging import get_logger
40 from elasticapm.utils.wsgi import get_current_url
41
42 logger = get_logger("elasticapm.traces")
43
44 ERROR_DISPATCH_UID = "elasticapm-exceptions"
45 REQUEST_START_DISPATCH_UID = "elasticapm-request-start"
46 REQUEST_FINISH_DISPATCH_UID = "elasticapm-request-stop"
47
48 MIDDLEWARE_NAME = "elasticapm.contrib.django.middleware.TracingMiddleware"
49
50 TRACEPARENT_HEADER_NAME_WSGI = "HTTP_" + constants.TRACEPARENT_HEADER_NAME.upper().replace("-", "_")
51 TRACEPARENT_LEGACY_HEADER_NAME_WSGI = "HTTP_" + constants.TRACEPARENT_LEGACY_HEADER_NAME.upper().replace("-", "_")
52 TRACESTATE_HEADER_NAME_WSGI = "HTTP_" + constants.TRACESTATE_HEADER_NAME.upper().replace("-", "_")
53
54
55 class ElasticAPMConfig(AppConfig):
56 name = "elasticapm.contrib.django"
57 label = "elasticapm.contrib.django"
58 verbose_name = "ElasticAPM"
59
60 def __init__(self, *args, **kwargs):
61 super(ElasticAPMConfig, self).__init__(*args, **kwargs)
62 self.client = None
63
64 def ready(self):
65 self.client = get_client()
66 if self.client.config.autoinsert_django_middleware:
67 self.insert_middleware(django_settings)
68 register_handlers(self.client)
69 if self.client.config.instrument and self.client.config.enabled:
70 instrument(self.client)
71 else:
72 self.client.logger.debug("Skipping instrumentation. INSTRUMENT is set to False.")
73
74 @staticmethod
75 def insert_middleware(settings):
76 if hasattr(settings, "MIDDLEWARE"):
77 middleware_list = settings.MIDDLEWARE
78 middleware_attr = "MIDDLEWARE"
79 elif hasattr(settings, "MIDDLEWARE_CLASSES"): # can be removed when we drop support for Django 1.x
80 middleware_list = settings.MIDDLEWARE_CLASSES
81 middleware_attr = "MIDDLEWARE_CLASSES"
82 else:
83 logger.debug("Could not find middleware setting, not autoinserting tracing middleware")
84 return
85 is_tuple = isinstance(middleware_list, tuple)
86 if is_tuple:
87 middleware_list = list(middleware_list)
88 elif not isinstance(middleware_list, list):
89 logger.debug("%s setting is not of type list or tuple, not autoinserting tracing middleware")
90 return
91 if middleware_list is not None and MIDDLEWARE_NAME not in middleware_list:
92 logger.debug("Inserting tracing middleware into settings.%s", middleware_attr)
93 middleware_list.insert(0, MIDDLEWARE_NAME)
94 if is_tuple:
95 middleware_list = tuple(middleware_list)
96 if middleware_list:
97 setattr(settings, middleware_attr, middleware_list)
98
99
100 def register_handlers(client):
101 from django.core.signals import got_request_exception, request_finished, request_started
102
103 from elasticapm.contrib.django.handlers import exception_handler
104
105 # Connect to Django's internal signal handlers
106 got_request_exception.disconnect(dispatch_uid=ERROR_DISPATCH_UID)
107 got_request_exception.connect(partial(exception_handler, client), dispatch_uid=ERROR_DISPATCH_UID, weak=False)
108
109 request_started.disconnect(dispatch_uid=REQUEST_START_DISPATCH_UID)
110 request_started.connect(
111 partial(_request_started_handler, client), dispatch_uid=REQUEST_START_DISPATCH_UID, weak=False
112 )
113
114 request_finished.disconnect(dispatch_uid=REQUEST_FINISH_DISPATCH_UID)
115 request_finished.connect(
116 lambda sender, **kwargs: client.end_transaction() if _should_start_transaction(client) else None,
117 dispatch_uid=REQUEST_FINISH_DISPATCH_UID,
118 weak=False,
119 )
120
121 # If we can import celery, register ourselves as exception handler
122 try:
123 import celery # noqa F401
124
125 from elasticapm.contrib.celery import register_exception_tracking
126
127 try:
128 register_exception_tracking(client)
129 except Exception as e:
130 client.logger.exception("Failed installing django-celery hook: %s" % e)
131 except ImportError:
132 client.logger.debug("Not instrumenting Celery, couldn't import")
133
134
135 def _request_started_handler(client, sender, *args, **kwargs):
136 if not _should_start_transaction(client):
137 return
138 # try to find trace id
139 trace_parent = None
140 if "environ" in kwargs:
141 url = get_current_url(kwargs["environ"], strip_querystring=True, path_only=True)
142 if client.should_ignore_url(url):
143 logger.debug("Ignoring request due to %s matching transaction_ignore_urls")
144 return
145 trace_parent = TraceParent.from_headers(
146 kwargs["environ"],
147 TRACEPARENT_HEADER_NAME_WSGI,
148 TRACEPARENT_LEGACY_HEADER_NAME_WSGI,
149 TRACESTATE_HEADER_NAME_WSGI,
150 )
151 elif "scope" in kwargs:
152 scope = kwargs["scope"]
153 fake_environ = {"SCRIPT_NAME": scope.get("root_path", ""), "PATH_INFO": scope["path"], "QUERY_STRING": ""}
154 url = get_current_url(fake_environ, strip_querystring=True, path_only=True)
155 if client.should_ignore_url(url):
156 logger.debug("Ignoring request due to %s matching transaction_ignore_urls")
157 return
158 if "headers" in scope:
159 trace_parent = TraceParent.from_headers(scope["headers"])
160 client.begin_transaction("request", trace_parent=trace_parent)
161
162
163 def instrument(client):
164 """
165 Auto-instruments code to get nice spans
166 """
167 from elasticapm.instrumentation.control import instrument
168
169 instrument()
170 try:
171 import celery # noqa F401
172
173 from elasticapm.contrib.celery import register_instrumentation
174
175 register_instrumentation(client)
176 except ImportError:
177 client.logger.debug("Not instrumenting Celery, couldn't import")
178
179
180 def _should_start_transaction(client):
181 middleware_attr = "MIDDLEWARE" if getattr(django_settings, "MIDDLEWARE", None) is not None else "MIDDLEWARE_CLASSES"
182 middleware = getattr(django_settings, middleware_attr)
183 return (
184 (not django_settings.DEBUG or client.config.debug)
185 and middleware
186 and "elasticapm.contrib.django.middleware.TracingMiddleware" in middleware
187 )
188
[end of elasticapm/contrib/django/apps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/contrib/django/apps.py b/elasticapm/contrib/django/apps.py
--- a/elasticapm/contrib/django/apps.py
+++ b/elasticapm/contrib/django/apps.py
@@ -54,7 +54,7 @@
class ElasticAPMConfig(AppConfig):
name = "elasticapm.contrib.django"
- label = "elasticapm.contrib.django"
+ label = "elasticapm"
verbose_name = "ElasticAPM"
def __init__(self, *args, **kwargs):
| {"golden_diff": "diff --git a/elasticapm/contrib/django/apps.py b/elasticapm/contrib/django/apps.py\n--- a/elasticapm/contrib/django/apps.py\n+++ b/elasticapm/contrib/django/apps.py\n@@ -54,7 +54,7 @@\n \n class ElasticAPMConfig(AppConfig):\n name = \"elasticapm.contrib.django\"\n- label = \"elasticapm.contrib.django\"\n+ label = \"elasticapm\"\n verbose_name = \"ElasticAPM\"\n \n def __init__(self, *args, **kwargs):\n", "issue": "Add support for Django 3.2\nDjango 3.2 is slated for a release in April. Running the test suite, a few problems came up:\r\n\r\n- [ ] App label needs to be a valid Python identifier, ours is not (renaming it from `elasticapm.contrib.django` to `elasticapm` should suffice)\r\n\r\nSeveral test failures:\r\n\r\n- [ ] `test_broken_500_handler_with_middleware`\r\n- [ ] `test_404_middleware`\r\n- [ ] `test_response_error_id_middleware`\r\n- [ ] `test_django_logging_request_kwarg`\r\n- [ ] `test_django_logging_middleware`\r\n- [ ] `test_capture_body_config_is_dynamic_for_transactions`\r\n- [ ] `test_capture_headers_config_is_dynamic_for_transactions`\r\n- [ ] `test_capture_headers`\r\n- [ ] `test_transaction_name_from_route`\r\n\r\nMost of these look similar in nature, I suspect an issue with middlewares. Nothing jumps out in the [release notes](https://docs.djangoproject.com/en/3.2/releases/3.2/), though.\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom functools import partial\n\nfrom django.apps import AppConfig\nfrom django.conf import settings as django_settings\n\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.django.client import get_client\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.wsgi import get_current_url\n\nlogger = get_logger(\"elasticapm.traces\")\n\nERROR_DISPATCH_UID = \"elasticapm-exceptions\"\nREQUEST_START_DISPATCH_UID = \"elasticapm-request-start\"\nREQUEST_FINISH_DISPATCH_UID = \"elasticapm-request-stop\"\n\nMIDDLEWARE_NAME = \"elasticapm.contrib.django.middleware.TracingMiddleware\"\n\nTRACEPARENT_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACEPARENT_HEADER_NAME.upper().replace(\"-\", \"_\")\nTRACEPARENT_LEGACY_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACEPARENT_LEGACY_HEADER_NAME.upper().replace(\"-\", \"_\")\nTRACESTATE_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACESTATE_HEADER_NAME.upper().replace(\"-\", \"_\")\n\n\nclass ElasticAPMConfig(AppConfig):\n name = \"elasticapm.contrib.django\"\n label = \"elasticapm.contrib.django\"\n verbose_name = \"ElasticAPM\"\n\n def __init__(self, *args, **kwargs):\n super(ElasticAPMConfig, self).__init__(*args, **kwargs)\n self.client = None\n\n def ready(self):\n self.client = get_client()\n if self.client.config.autoinsert_django_middleware:\n self.insert_middleware(django_settings)\n register_handlers(self.client)\n if self.client.config.instrument and self.client.config.enabled:\n instrument(self.client)\n else:\n self.client.logger.debug(\"Skipping instrumentation. INSTRUMENT is set to False.\")\n\n @staticmethod\n def insert_middleware(settings):\n if hasattr(settings, \"MIDDLEWARE\"):\n middleware_list = settings.MIDDLEWARE\n middleware_attr = \"MIDDLEWARE\"\n elif hasattr(settings, \"MIDDLEWARE_CLASSES\"): # can be removed when we drop support for Django 1.x\n middleware_list = settings.MIDDLEWARE_CLASSES\n middleware_attr = \"MIDDLEWARE_CLASSES\"\n else:\n logger.debug(\"Could not find middleware setting, not autoinserting tracing middleware\")\n return\n is_tuple = isinstance(middleware_list, tuple)\n if is_tuple:\n middleware_list = list(middleware_list)\n elif not isinstance(middleware_list, list):\n logger.debug(\"%s setting is not of type list or tuple, not autoinserting tracing middleware\")\n return\n if middleware_list is not None and MIDDLEWARE_NAME not in middleware_list:\n logger.debug(\"Inserting tracing middleware into settings.%s\", middleware_attr)\n middleware_list.insert(0, MIDDLEWARE_NAME)\n if is_tuple:\n middleware_list = tuple(middleware_list)\n if middleware_list:\n setattr(settings, middleware_attr, middleware_list)\n\n\ndef register_handlers(client):\n from django.core.signals import got_request_exception, request_finished, request_started\n\n from elasticapm.contrib.django.handlers import exception_handler\n\n # Connect to Django's internal signal handlers\n got_request_exception.disconnect(dispatch_uid=ERROR_DISPATCH_UID)\n got_request_exception.connect(partial(exception_handler, client), dispatch_uid=ERROR_DISPATCH_UID, weak=False)\n\n request_started.disconnect(dispatch_uid=REQUEST_START_DISPATCH_UID)\n request_started.connect(\n partial(_request_started_handler, client), dispatch_uid=REQUEST_START_DISPATCH_UID, weak=False\n )\n\n request_finished.disconnect(dispatch_uid=REQUEST_FINISH_DISPATCH_UID)\n request_finished.connect(\n lambda sender, **kwargs: client.end_transaction() if _should_start_transaction(client) else None,\n dispatch_uid=REQUEST_FINISH_DISPATCH_UID,\n weak=False,\n )\n\n # If we can import celery, register ourselves as exception handler\n try:\n import celery # noqa F401\n\n from elasticapm.contrib.celery import register_exception_tracking\n\n try:\n register_exception_tracking(client)\n except Exception as e:\n client.logger.exception(\"Failed installing django-celery hook: %s\" % e)\n except ImportError:\n client.logger.debug(\"Not instrumenting Celery, couldn't import\")\n\n\ndef _request_started_handler(client, sender, *args, **kwargs):\n if not _should_start_transaction(client):\n return\n # try to find trace id\n trace_parent = None\n if \"environ\" in kwargs:\n url = get_current_url(kwargs[\"environ\"], strip_querystring=True, path_only=True)\n if client.should_ignore_url(url):\n logger.debug(\"Ignoring request due to %s matching transaction_ignore_urls\")\n return\n trace_parent = TraceParent.from_headers(\n kwargs[\"environ\"],\n TRACEPARENT_HEADER_NAME_WSGI,\n TRACEPARENT_LEGACY_HEADER_NAME_WSGI,\n TRACESTATE_HEADER_NAME_WSGI,\n )\n elif \"scope\" in kwargs:\n scope = kwargs[\"scope\"]\n fake_environ = {\"SCRIPT_NAME\": scope.get(\"root_path\", \"\"), \"PATH_INFO\": scope[\"path\"], \"QUERY_STRING\": \"\"}\n url = get_current_url(fake_environ, strip_querystring=True, path_only=True)\n if client.should_ignore_url(url):\n logger.debug(\"Ignoring request due to %s matching transaction_ignore_urls\")\n return\n if \"headers\" in scope:\n trace_parent = TraceParent.from_headers(scope[\"headers\"])\n client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n\ndef instrument(client):\n \"\"\"\n Auto-instruments code to get nice spans\n \"\"\"\n from elasticapm.instrumentation.control import instrument\n\n instrument()\n try:\n import celery # noqa F401\n\n from elasticapm.contrib.celery import register_instrumentation\n\n register_instrumentation(client)\n except ImportError:\n client.logger.debug(\"Not instrumenting Celery, couldn't import\")\n\n\ndef _should_start_transaction(client):\n middleware_attr = \"MIDDLEWARE\" if getattr(django_settings, \"MIDDLEWARE\", None) is not None else \"MIDDLEWARE_CLASSES\"\n middleware = getattr(django_settings, middleware_attr)\n return (\n (not django_settings.DEBUG or client.config.debug)\n and middleware\n and \"elasticapm.contrib.django.middleware.TracingMiddleware\" in middleware\n )\n", "path": "elasticapm/contrib/django/apps.py"}]} | 2,916 | 129 |
gh_patches_debug_17245 | rasdani/github-patches | git_diff | mars-project__mars-2683 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Optimization that compacts multiple filters into `eval` generates unexpected node in graph
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
Optimization that compacts multiple filters into eval generates unexpected node in graph.
**To Reproduce**
To help us reproducing this bug, please provide information below:
1. Your Python version
2. The version of Mars you use
3. Versions of crucial packages, such as numpy, scipy and pandas
4. Full stack of the error.
5. Minimized code to reproduce the error.
```python
@enter_mode(build=True)
def test_arithmetic_query(setup):
df1 = md.DataFrame(raw, chunk_size=10)
df2 = md.DataFrame(raw2, chunk_size=10)
df3 = df1.merge(df2, on='A', suffixes=('', '_'))
df3['K'] = df4 = df3["A"] * (1 - df3["B"])
graph = TileableGraph([df3.data])
next(TileableGraphBuilder(graph).build())
records = optimize(graph)
opt_df4 = records.get_optimization_result(df4.data)
assert opt_df4.op.expr == "(`A`) * ((1) - (`B`))"
assert len(graph) == 5 # for now len(graph) is 6
assert len([n for n in graph if isinstance(n.op, DataFrameEval)]) == 1 # and 2 evals exist
```
</issue>
<code>
[start of mars/optimization/logical/core.py]
1 # Copyright 1999-2021 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import weakref
16 from abc import ABC, abstractmethod
17 from collections import defaultdict
18 from dataclasses import dataclass
19 from enum import Enum
20 from typing import Dict, List, Tuple, Type
21
22 from ...core import OperandType, ChunkType, EntityType, enter_mode
23 from ...core.graph import EntityGraph
24 from ...core.operand import Operand
25
26
27 class OptimizationRecordType(Enum):
28 replace = 0
29 new = 1
30 delete = 2
31
32
33 @dataclass
34 class OptimizationRecord:
35 original_chunk: ChunkType = None
36 new_chunk: ChunkType = None
37 record_type: OptimizationRecordType = None
38
39
40 class OptimizationRecords:
41 _records: List[OptimizationRecord]
42 _original_chunk_to_records: Dict[ChunkType, OptimizationRecord]
43
44 def __init__(self):
45 self._records = list()
46 self._original_chunk_to_records = dict()
47 self._optimized_chunk_to_records = dict()
48
49 def append_record(self, record: OptimizationRecord):
50 self._records.append(record)
51 if record.record_type in (
52 OptimizationRecordType.replace,
53 OptimizationRecordType.delete,
54 ):
55 self._original_chunk_to_records[record.original_chunk] = record
56 if record.record_type in (
57 OptimizationRecordType.new,
58 OptimizationRecordType.replace,
59 ):
60 self._optimized_chunk_to_records[record.new_chunk] = record
61
62 def get_optimization_result(self, original_chunk: ChunkType) -> ChunkType:
63 chunk = original_chunk
64 if chunk not in self._original_chunk_to_records:
65 return
66 while chunk in self._original_chunk_to_records:
67 record = self._original_chunk_to_records[chunk]
68 if record.record_type == OptimizationRecordType.replace:
69 chunk = record.new_chunk
70 else:
71 assert record.record_type == OptimizationRecordType.delete
72 return None
73 return chunk
74
75 def get_original_chunk(self, optimized_chunk: ChunkType) -> ChunkType:
76 chunk = optimized_chunk
77 if chunk not in self._optimized_chunk_to_records:
78 return
79 while chunk in self._optimized_chunk_to_records:
80 record = self._optimized_chunk_to_records[chunk]
81 if record.record_type == OptimizationRecordType.replace:
82 chunk = record.original_chunk
83 else:
84 assert record.record_type == OptimizationRecordType.new
85 return None
86 return chunk
87
88
89 class OptimizationRule(ABC):
90 _instances: Dict[
91 Tuple[Type["OptimizationRule"], EntityGraph, OptimizationRecords],
92 "OptimizationRule",
93 ] = dict()
94 _preds_to_remove = weakref.WeakKeyDictionary()
95
96 def __init__(
97 self,
98 graph: EntityGraph,
99 records: OptimizationRecords,
100 optimizer_cls: Type["Optimizer"],
101 ):
102 self._graph = graph
103 self._records = records
104 self._optimizer_cls = optimizer_cls
105
106 def __new__(
107 cls,
108 graph: EntityGraph,
109 records: OptimizationRecords,
110 optimizer_cls: Type["Optimizer"],
111 ):
112 if (cls, graph, records) in cls._instances:
113 return cls._instances[cls, graph, records]
114 inst = cls._instances[cls, graph, records] = object.__new__(cls)
115 return inst
116
117 @abstractmethod
118 def match(self, op: OperandType) -> bool:
119 """
120 If this operand matches this rule.
121
122 Parameters
123 ----------
124 op : OperandType
125 Operand.
126
127 Returns
128 -------
129 matched : bool
130 Matched rule or not.
131 """
132
133 @abstractmethod
134 def apply(self, op: OperandType):
135 """
136 Apply rule to an operand.
137
138 Parameters
139 ----------
140 op : OperandType
141 Operand
142 """
143
144 def _replace_node(self, original_node: EntityType, new_node: EntityType):
145 predecessors = self._graph.predecessors(original_node)
146 successors = self._graph.successors(original_node)
147 self._graph.remove_node(original_node)
148 self._graph.add_node(new_node)
149 for pred in predecessors:
150 self._graph.add_edge(pred, new_node)
151 for succ in successors:
152 self._graph.add_edge(new_node, succ)
153
154 @classmethod
155 def _add_collapsable_predecessor(cls, node: EntityType, predecessor: EntityType):
156 if predecessor not in cls._preds_to_remove:
157 cls._preds_to_remove[predecessor] = {node}
158 else:
159 cls._preds_to_remove[predecessor].add(node)
160
161 def _remove_collapsable_predecessors(self, node: EntityType):
162 node = self._records.get_optimization_result(node) or node
163 preds_opt_to_remove = []
164 for pred in self._graph.predecessors(node):
165 pred_original = self._records.get_original_chunk(pred) or pred
166 pred_opt = self._records.get_optimization_result(pred) or pred
167 if pred_opt in self._graph.results or pred_original in self._graph.results:
168 continue
169 affect_succ = self._preds_to_remove.get(pred_original) or []
170 affect_succ_opt = [
171 self._records.get_optimization_result(s) or s for s in affect_succ
172 ]
173 if all(s in affect_succ_opt for s in self._graph.successors(pred)):
174 preds_opt_to_remove.append((pred_original, pred_opt))
175
176 for pred_original, pred_opt in preds_opt_to_remove:
177 self._graph.remove_node(pred_opt)
178 self._records.append_record(
179 OptimizationRecord(pred_original, None, OptimizationRecordType.delete)
180 )
181
182
183 class Optimizer(ABC):
184 _rules: List[Type[OptimizationRule]]
185 _op_to_rules: Dict[Type[OperandType], List[Type[OptimizationRule]]]
186
187 @classmethod
188 def register_rule(
189 cls, operand_types: List[Type[OperandType]], rule: Type[OptimizationRule]
190 ):
191 if not hasattr(cls, "_rules"):
192 cls._rules = []
193 cls._rules.append(rule)
194
195 if not hasattr(cls, "_op_to_rules"):
196 cls._op_to_rules = defaultdict(list)
197 for operand_type in operand_types:
198 cls._op_to_rules[operand_type].append(rule)
199
200 @classmethod
201 def get_rule_types(
202 cls, operand_type: Type[OperandType]
203 ) -> List[Type[OptimizationRule]]:
204 rule_types = cls._op_to_rules.get(operand_type, None)
205 if rule_types is None:
206 for op_cls in operand_type.__mro__:
207 if op_cls is Operand:
208 break
209 rule_types = cls._op_to_rules.get(op_cls)
210 if rule_types is not None:
211 break
212 cls._op_to_rules[operand_type] = rule_types or []
213 return rule_types
214
215 @classmethod
216 def _replace_inputs(cls, graph: EntityGraph, records: OptimizationRecords):
217 for node in graph:
218 for succ in graph.successors(node):
219 input_optimized = False
220 new_inputs = []
221 for inp in succ.inputs:
222 optimized = records.get_optimization_result(inp)
223 if optimized is None:
224 optimized = inp
225 if optimized is not inp:
226 input_optimized = True
227 new_inputs.append(optimized)
228 if input_optimized:
229 succ.inputs = new_inputs
230
231 @classmethod
232 @enter_mode(build=True)
233 def optimize(cls, graph: EntityGraph) -> OptimizationRecords:
234 """
235 Optimize a graph.
236
237 Parameters
238 ----------
239 graph : EntityGraph
240 Tileable or chunk graph.
241
242 Returns
243 -------
244 optimization_records : OptimizationRecords
245 Optimization records.
246 """
247 records = OptimizationRecords()
248 optimized = False
249 for rule_type in cls._rules:
250 visited = set()
251 for entity in list(graph.topological_iter()):
252 op = entity.op
253 if op in visited:
254 continue
255 visited.add(op)
256
257 rule_types = cls.get_rule_types(type(op)) or []
258 if rule_type not in rule_types:
259 continue
260
261 rule = rule_type(graph, records, cls)
262 if entity not in graph: # pragma: no cover
263 # maybe removed during optimization
264 continue
265 if rule.match(op):
266 optimized = True
267 rule.apply(op)
268 if optimized:
269 cls._replace_inputs(graph, records)
270 return records
271
[end of mars/optimization/logical/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mars/optimization/logical/core.py b/mars/optimization/logical/core.py
--- a/mars/optimization/logical/core.py
+++ b/mars/optimization/logical/core.py
@@ -162,8 +162,12 @@
node = self._records.get_optimization_result(node) or node
preds_opt_to_remove = []
for pred in self._graph.predecessors(node):
- pred_original = self._records.get_original_chunk(pred) or pred
- pred_opt = self._records.get_optimization_result(pred) or pred
+ pred_original = self._records.get_original_chunk(pred)
+ pred_original = pred_original if pred_original is not None else pred
+
+ pred_opt = self._records.get_optimization_result(pred)
+ pred_opt = pred_opt if pred_opt is not None else pred
+
if pred_opt in self._graph.results or pred_original in self._graph.results:
continue
affect_succ = self._preds_to_remove.get(pred_original) or []
| {"golden_diff": "diff --git a/mars/optimization/logical/core.py b/mars/optimization/logical/core.py\n--- a/mars/optimization/logical/core.py\n+++ b/mars/optimization/logical/core.py\n@@ -162,8 +162,12 @@\n node = self._records.get_optimization_result(node) or node\n preds_opt_to_remove = []\n for pred in self._graph.predecessors(node):\n- pred_original = self._records.get_original_chunk(pred) or pred\n- pred_opt = self._records.get_optimization_result(pred) or pred\n+ pred_original = self._records.get_original_chunk(pred)\n+ pred_original = pred_original if pred_original is not None else pred\n+\n+ pred_opt = self._records.get_optimization_result(pred)\n+ pred_opt = pred_opt if pred_opt is not None else pred\n+\n if pred_opt in self._graph.results or pred_original in self._graph.results:\n continue\n affect_succ = self._preds_to_remove.get(pred_original) or []\n", "issue": "[BUG] Optimization that compacts multiple filters into `eval` generates unexpected node in graph\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\n\r\nOptimization that compacts multiple filters into eval generates unexpected node in graph.\r\n\r\n**To Reproduce**\r\nTo help us reproducing this bug, please provide information below:\r\n1. Your Python version\r\n2. The version of Mars you use\r\n3. Versions of crucial packages, such as numpy, scipy and pandas\r\n4. Full stack of the error.\r\n5. Minimized code to reproduce the error.\r\n\r\n```python\r\n@enter_mode(build=True)\r\ndef test_arithmetic_query(setup):\r\n df1 = md.DataFrame(raw, chunk_size=10)\r\n df2 = md.DataFrame(raw2, chunk_size=10)\r\n df3 = df1.merge(df2, on='A', suffixes=('', '_'))\r\n df3['K'] = df4 = df3[\"A\"] * (1 - df3[\"B\"])\r\n graph = TileableGraph([df3.data])\r\n next(TileableGraphBuilder(graph).build())\r\n records = optimize(graph)\r\n opt_df4 = records.get_optimization_result(df4.data)\r\n assert opt_df4.op.expr == \"(`A`) * ((1) - (`B`))\"\r\n assert len(graph) == 5 # for now len(graph) is 6\r\n assert len([n for n in graph if isinstance(n.op, DataFrameEval)]) == 1 # and 2 evals exist\r\n```\r\n\n", "before_files": [{"content": "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport weakref\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Dict, List, Tuple, Type\n\nfrom ...core import OperandType, ChunkType, EntityType, enter_mode\nfrom ...core.graph import EntityGraph\nfrom ...core.operand import Operand\n\n\nclass OptimizationRecordType(Enum):\n replace = 0\n new = 1\n delete = 2\n\n\n@dataclass\nclass OptimizationRecord:\n original_chunk: ChunkType = None\n new_chunk: ChunkType = None\n record_type: OptimizationRecordType = None\n\n\nclass OptimizationRecords:\n _records: List[OptimizationRecord]\n _original_chunk_to_records: Dict[ChunkType, OptimizationRecord]\n\n def __init__(self):\n self._records = list()\n self._original_chunk_to_records = dict()\n self._optimized_chunk_to_records = dict()\n\n def append_record(self, record: OptimizationRecord):\n self._records.append(record)\n if record.record_type in (\n OptimizationRecordType.replace,\n OptimizationRecordType.delete,\n ):\n self._original_chunk_to_records[record.original_chunk] = record\n if record.record_type in (\n OptimizationRecordType.new,\n OptimizationRecordType.replace,\n ):\n self._optimized_chunk_to_records[record.new_chunk] = record\n\n def get_optimization_result(self, original_chunk: ChunkType) -> ChunkType:\n chunk = original_chunk\n if chunk not in self._original_chunk_to_records:\n return\n while chunk in self._original_chunk_to_records:\n record = self._original_chunk_to_records[chunk]\n if record.record_type == OptimizationRecordType.replace:\n chunk = record.new_chunk\n else:\n assert record.record_type == OptimizationRecordType.delete\n return None\n return chunk\n\n def get_original_chunk(self, optimized_chunk: ChunkType) -> ChunkType:\n chunk = optimized_chunk\n if chunk not in self._optimized_chunk_to_records:\n return\n while chunk in self._optimized_chunk_to_records:\n record = self._optimized_chunk_to_records[chunk]\n if record.record_type == OptimizationRecordType.replace:\n chunk = record.original_chunk\n else:\n assert record.record_type == OptimizationRecordType.new\n return None\n return chunk\n\n\nclass OptimizationRule(ABC):\n _instances: Dict[\n Tuple[Type[\"OptimizationRule\"], EntityGraph, OptimizationRecords],\n \"OptimizationRule\",\n ] = dict()\n _preds_to_remove = weakref.WeakKeyDictionary()\n\n def __init__(\n self,\n graph: EntityGraph,\n records: OptimizationRecords,\n optimizer_cls: Type[\"Optimizer\"],\n ):\n self._graph = graph\n self._records = records\n self._optimizer_cls = optimizer_cls\n\n def __new__(\n cls,\n graph: EntityGraph,\n records: OptimizationRecords,\n optimizer_cls: Type[\"Optimizer\"],\n ):\n if (cls, graph, records) in cls._instances:\n return cls._instances[cls, graph, records]\n inst = cls._instances[cls, graph, records] = object.__new__(cls)\n return inst\n\n @abstractmethod\n def match(self, op: OperandType) -> bool:\n \"\"\"\n If this operand matches this rule.\n\n Parameters\n ----------\n op : OperandType\n Operand.\n\n Returns\n -------\n matched : bool\n Matched rule or not.\n \"\"\"\n\n @abstractmethod\n def apply(self, op: OperandType):\n \"\"\"\n Apply rule to an operand.\n\n Parameters\n ----------\n op : OperandType\n Operand\n \"\"\"\n\n def _replace_node(self, original_node: EntityType, new_node: EntityType):\n predecessors = self._graph.predecessors(original_node)\n successors = self._graph.successors(original_node)\n self._graph.remove_node(original_node)\n self._graph.add_node(new_node)\n for pred in predecessors:\n self._graph.add_edge(pred, new_node)\n for succ in successors:\n self._graph.add_edge(new_node, succ)\n\n @classmethod\n def _add_collapsable_predecessor(cls, node: EntityType, predecessor: EntityType):\n if predecessor not in cls._preds_to_remove:\n cls._preds_to_remove[predecessor] = {node}\n else:\n cls._preds_to_remove[predecessor].add(node)\n\n def _remove_collapsable_predecessors(self, node: EntityType):\n node = self._records.get_optimization_result(node) or node\n preds_opt_to_remove = []\n for pred in self._graph.predecessors(node):\n pred_original = self._records.get_original_chunk(pred) or pred\n pred_opt = self._records.get_optimization_result(pred) or pred\n if pred_opt in self._graph.results or pred_original in self._graph.results:\n continue\n affect_succ = self._preds_to_remove.get(pred_original) or []\n affect_succ_opt = [\n self._records.get_optimization_result(s) or s for s in affect_succ\n ]\n if all(s in affect_succ_opt for s in self._graph.successors(pred)):\n preds_opt_to_remove.append((pred_original, pred_opt))\n\n for pred_original, pred_opt in preds_opt_to_remove:\n self._graph.remove_node(pred_opt)\n self._records.append_record(\n OptimizationRecord(pred_original, None, OptimizationRecordType.delete)\n )\n\n\nclass Optimizer(ABC):\n _rules: List[Type[OptimizationRule]]\n _op_to_rules: Dict[Type[OperandType], List[Type[OptimizationRule]]]\n\n @classmethod\n def register_rule(\n cls, operand_types: List[Type[OperandType]], rule: Type[OptimizationRule]\n ):\n if not hasattr(cls, \"_rules\"):\n cls._rules = []\n cls._rules.append(rule)\n\n if not hasattr(cls, \"_op_to_rules\"):\n cls._op_to_rules = defaultdict(list)\n for operand_type in operand_types:\n cls._op_to_rules[operand_type].append(rule)\n\n @classmethod\n def get_rule_types(\n cls, operand_type: Type[OperandType]\n ) -> List[Type[OptimizationRule]]:\n rule_types = cls._op_to_rules.get(operand_type, None)\n if rule_types is None:\n for op_cls in operand_type.__mro__:\n if op_cls is Operand:\n break\n rule_types = cls._op_to_rules.get(op_cls)\n if rule_types is not None:\n break\n cls._op_to_rules[operand_type] = rule_types or []\n return rule_types\n\n @classmethod\n def _replace_inputs(cls, graph: EntityGraph, records: OptimizationRecords):\n for node in graph:\n for succ in graph.successors(node):\n input_optimized = False\n new_inputs = []\n for inp in succ.inputs:\n optimized = records.get_optimization_result(inp)\n if optimized is None:\n optimized = inp\n if optimized is not inp:\n input_optimized = True\n new_inputs.append(optimized)\n if input_optimized:\n succ.inputs = new_inputs\n\n @classmethod\n @enter_mode(build=True)\n def optimize(cls, graph: EntityGraph) -> OptimizationRecords:\n \"\"\"\n Optimize a graph.\n\n Parameters\n ----------\n graph : EntityGraph\n Tileable or chunk graph.\n\n Returns\n -------\n optimization_records : OptimizationRecords\n Optimization records.\n \"\"\"\n records = OptimizationRecords()\n optimized = False\n for rule_type in cls._rules:\n visited = set()\n for entity in list(graph.topological_iter()):\n op = entity.op\n if op in visited:\n continue\n visited.add(op)\n\n rule_types = cls.get_rule_types(type(op)) or []\n if rule_type not in rule_types:\n continue\n\n rule = rule_type(graph, records, cls)\n if entity not in graph: # pragma: no cover\n # maybe removed during optimization\n continue\n if rule.match(op):\n optimized = True\n rule.apply(op)\n if optimized:\n cls._replace_inputs(graph, records)\n return records\n", "path": "mars/optimization/logical/core.py"}]} | 3,501 | 228 |
gh_patches_debug_17694 | rasdani/github-patches | git_diff | learningequality__kolibri-2484 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Starting up Kolibri Development Server
### Observed behavior
Trying to run the Kolobri Development Server from Ubuntu Bash on Windows, but it raises a ValueError for incorrect timezone (local).
### Expected behavior
Should start up the Kolibri server on local machine.
### Errors and logs
Relevant errors and tracebacks from:
*virtual environment command line:
```
Traceback (most recent call last):
File "/home/alan/.venvs/kolibri/bin/kolibri", line 11, in <module>
load_entry_point('kolibri', 'console_scripts', 'kolibri')()
File "/home/alan/Kolibri1/kolibri/kolibri/utils/cli.py", line 607, in main
initialize(debug=debug)
File "/home/alan/Kolibri1/kolibri/kolibri/utils/cli.py", line 176, in initialize
django.setup()
File "/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/__init__.py", line 17, in setup
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
File "/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py", line 55, in __getattr__
self._setup(name)
File "/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py", line 43, in _setup
self._wrapped = Settings(settings_module)
File "/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py", line 138, in __init__
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
ValueError: Incorrect timezone setting: local
```
### Steps to reproduce
Run command to start up Kolibri Development server.
kolibri --debug manage devserver --webpack
### Context
* Kolibri version: Develop Branch
* Operating system: Ubuntu Bash for Windows
</issue>
<code>
[start of kolibri/deployment/default/settings/base.py]
1 # -*- coding: utf-8 -*-
2 """
3 Django settings for kolibri project.
4
5 For more information on this file, see
6 https://docs.djangoproject.com/en/1.9/topics/settings/
7
8 For the full list of settings and their values, see
9 https://docs.djangoproject.com/en/1.9/ref/settings/
10 """
11 from __future__ import absolute_import, print_function, unicode_literals
12
13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
14 import os
15
16 # import kolibri, so we can get the path to the module.
17 import kolibri
18 # we load other utilities related to i18n
19 # This is essential! We load the kolibri conf INSIDE the Django conf
20 from kolibri.utils import conf, i18n
21 from tzlocal import get_localzone
22
23 KOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__)
24
25 BASE_DIR = os.path.abspath(os.path.dirname(__name__))
26
27 KOLIBRI_HOME = os.environ['KOLIBRI_HOME']
28
29 KOLIBRI_CORE_JS_NAME = 'kolibriGlobal'
30
31 LOCALE_PATHS = [
32 os.path.join(KOLIBRI_MODULE_PATH, "locale"),
33 ]
34
35 # Quick-start development settings - unsuitable for production
36 # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
37
38 # SECURITY WARNING: keep the secret key used in production secret!
39 SECRET_KEY = 'f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27'
40
41 # SECURITY WARNING: don't run with debug turned on in production!
42 DEBUG = False
43
44 ALLOWED_HOSTS = ['*']
45
46 # Application definition
47
48 INSTALLED_APPS = [
49 'kolibri.core',
50 'django.contrib.admin',
51 'django.contrib.auth',
52 'django.contrib.contenttypes',
53 'django.contrib.sessions',
54 'django.contrib.messages',
55 'django.contrib.staticfiles',
56 'kolibri.auth.apps.KolibriAuthConfig',
57 'kolibri.content',
58 'kolibri.logger',
59 'kolibri.tasks.apps.KolibriTasksConfig',
60 'kolibri.core.webpack',
61 'kolibri.core.exams',
62 'kolibri.core.device',
63 'kolibri.core.discovery',
64 'rest_framework',
65 'django_js_reverse',
66 'jsonfield',
67 'morango',
68 ] + conf.config['INSTALLED_APPS']
69
70 # Add in the external plugins' locale paths. Our frontend messages depends
71 # specifically on the value of LOCALE_PATHS to find its catalog files.
72 LOCALE_PATHS += [
73 i18n.get_installed_app_locale_path(app) for app in INSTALLED_APPS
74 if i18n.is_external_plugin(app)
75 ]
76
77 MIDDLEWARE_CLASSES = (
78 'django.contrib.sessions.middleware.SessionMiddleware',
79 'kolibri.core.device.middleware.KolibriLocaleMiddleware',
80 'django.middleware.common.CommonMiddleware',
81 'django.middleware.csrf.CsrfViewMiddleware',
82 'kolibri.plugins.setup_wizard.middleware.SetupWizardMiddleware',
83 'kolibri.auth.middleware.CustomAuthenticationMiddleware',
84 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
85 'django.contrib.messages.middleware.MessageMiddleware',
86 'django.middleware.clickjacking.XFrameOptionsMiddleware',
87 'django.middleware.security.SecurityMiddleware',
88 )
89
90 QUEUE_JOB_STORAGE_PATH = os.path.join(KOLIBRI_HOME, "job_storage.sqlite3")
91
92 ROOT_URLCONF = 'kolibri.deployment.default.urls'
93
94 TEMPLATES = [
95 {
96 'BACKEND': 'django.template.backends.django.DjangoTemplates',
97 'DIRS': [],
98 'APP_DIRS': True,
99 'OPTIONS': {
100 'context_processors': [
101 'django.template.context_processors.debug',
102 'django.template.context_processors.request',
103 'django.contrib.auth.context_processors.auth',
104 'django.contrib.messages.context_processors.messages',
105 'kolibri.core.context_processors.custom_context_processor.return_session',
106 ],
107 },
108 },
109 ]
110
111 WSGI_APPLICATION = 'kolibri.deployment.default.wsgi.application'
112
113
114 # Database
115 # https://docs.djangoproject.com/en/1.9/ref/settings/#databases
116
117 DATABASES = {
118 'default': {
119 'ENGINE': 'django.db.backends.sqlite3',
120 'NAME': os.path.join(KOLIBRI_HOME, 'db.sqlite3'),
121 'OPTIONS': {
122 'timeout': 100,
123 }
124 },
125 }
126
127 # Content directories and URLs for channel metadata and content files
128
129 # Directory and URL for storing content databases for channel data
130 CONTENT_DATABASE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'databases')
131 if not os.path.exists(CONTENT_DATABASE_DIR):
132 os.makedirs(CONTENT_DATABASE_DIR)
133
134 # Directory and URL for storing de-duped content files for all channels
135 CONTENT_STORAGE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'storage')
136 if not os.path.exists(CONTENT_STORAGE_DIR):
137 os.makedirs(CONTENT_STORAGE_DIR)
138
139 # Base default URL for downloading content from an online server
140 CENTRAL_CONTENT_DOWNLOAD_BASE_URL = "http://studio.learningequality.org"
141
142 # Internationalization
143 # https://docs.djangoproject.com/en/1.9/topics/i18n/
144
145 LANGUAGES = [
146 ('en', 'English'),
147 ('sw-tz', 'Kiswahili'),
148 ('es-es', 'Español'),
149 ('es-mx', 'Español (México)'),
150 ('fr-fr', 'Français'),
151 ('pt-pt', 'Português'),
152 ('hi-in', 'हिंदी'),
153 ('ar-eg', 'العَرَبِيَّة')
154 ]
155
156 LANGUAGE_CODE = conf.config.get("LANGUAGE_CODE") or "en"
157
158 TIME_ZONE = get_localzone().zone
159
160 USE_I18N = True
161
162 USE_L10N = True
163
164 USE_TZ = True
165
166 # Static files (CSS, JavaScript, Images)
167 # https://docs.djangoproject.com/en/1.9/howto/static-files/
168
169 STATIC_URL = '/static/'
170 STATIC_ROOT = os.path.join(KOLIBRI_HOME, "static")
171
172 # https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-LOGGING
173 # https://docs.djangoproject.com/en/1.9/topics/logging/
174
175 LOGGING = {
176 'version': 1,
177 'disable_existing_loggers': False,
178 'formatters': {
179 'verbose': {
180 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
181 },
182 'simple': {
183 'format': '%(levelname)s %(message)s'
184 },
185 'simple_date': {
186 'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
187 },
188 'color': {
189 '()': 'colorlog.ColoredFormatter',
190 'format': '%(log_color)s%(levelname)-8s %(message)s',
191 'log_colors': {
192 'DEBUG': 'bold_black',
193 'INFO': 'white',
194 'WARNING': 'yellow',
195 'ERROR': 'red',
196 'CRITICAL': 'bold_red',
197 },
198 }
199 },
200 'filters': {
201 'require_debug_true': {
202 '()': 'django.utils.log.RequireDebugTrue',
203 },
204 'require_debug_false': {
205 '()': 'django.utils.log.RequireDebugFalse',
206 },
207 },
208 'handlers': {
209 'console': {
210 'level': 'INFO',
211 'class': 'logging.StreamHandler',
212 'formatter': 'color'
213 },
214 'mail_admins': {
215 'level': 'ERROR',
216 'class': 'django.utils.log.AdminEmailHandler',
217 'filters': ['require_debug_false'],
218 },
219 'request_debug': {
220 'level': 'ERROR',
221 'class': 'logging.StreamHandler',
222 'formatter': 'color',
223 'filters': ['require_debug_true'],
224 },
225 'file_debug': {
226 'level': 'DEBUG',
227 'filters': ['require_debug_true'],
228 'class': 'logging.FileHandler',
229 'filename': os.path.join(KOLIBRI_HOME, 'debug.log'),
230 'formatter': 'simple_date',
231 },
232 'file': {
233 'level': 'INFO',
234 'filters': [],
235 'class': 'logging.FileHandler',
236 'filename': os.path.join(KOLIBRI_HOME, 'kolibri.log'),
237 'formatter': 'simple_date',
238 },
239 },
240 'loggers': {
241 'django': {
242 'handlers': ['console', 'file'],
243 'propagate': True,
244 },
245 'django.request': {
246 'handlers': ['mail_admins', 'file', 'request_debug'],
247 'level': 'ERROR',
248 'propagate': False,
249 },
250 'kolibri': {
251 'handlers': ['console', 'mail_admins', 'file', 'file_debug'],
252 'level': 'INFO',
253 }
254 }
255 }
256
257
258 # Customizing Django auth system
259 # https://docs.djangoproject.com/en/1.9/topics/auth/customizing/
260
261 AUTH_USER_MODEL = 'kolibriauth.FacilityUser'
262
263 AUTHENTICATION_BACKENDS = ['kolibri.auth.backends.FacilityUserBackend']
264
265
266 # Django REST Framework
267 # http://www.django-rest-framework.org/api-guide/settings/
268
269 REST_FRAMEWORK = {
270 "UNAUTHENTICATED_USER": "kolibri.auth.models.KolibriAnonymousUser",
271 'DEFAULT_RENDERER_CLASSES': (
272 'rest_framework.renderers.JSONRenderer',
273 'rest_framework.renderers.BrowsableAPIRenderer',
274 'rest_framework_csv.renderers.CSVRenderer',
275 ),
276 }
277
278 # System warnings to disable
279 # see https://docs.djangoproject.com/en/1.9/ref/settings/#silenced-system-checks
280 SILENCED_SYSTEM_CHECKS = ["auth.W004"]
281
282 # Configuration for Django JS Reverse
283 # https://github.com/ierror/django-js-reverse#options
284
285 JS_REVERSE_JS_VAR_NAME = 'kolibriUrls'
286
287 JS_REVERSE_EXCLUDE_NAMESPACES = ['admin', ]
288
289 ENABLE_DATA_BOOTSTRAPPING = True
290
[end of kolibri/deployment/default/settings/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/deployment/default/settings/base.py b/kolibri/deployment/default/settings/base.py
--- a/kolibri/deployment/default/settings/base.py
+++ b/kolibri/deployment/default/settings/base.py
@@ -13,6 +13,8 @@
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
+import pytz
+
# import kolibri, so we can get the path to the module.
import kolibri
# we load other utilities related to i18n
@@ -155,7 +157,18 @@
LANGUAGE_CODE = conf.config.get("LANGUAGE_CODE") or "en"
-TIME_ZONE = get_localzone().zone
+try:
+ TIME_ZONE = get_localzone().zone
+except pytz.UnknownTimeZoneError:
+ # Do not fail at this point because a timezone was not
+ # detected.
+ TIME_ZONE = pytz.utc.zone
+
+# Fixes https://github.com/regebro/tzlocal/issues/44
+# tzlocal 1.4 returns 'local' if unable to detect the timezone,
+# and this TZ id is invalid
+if TIME_ZONE == "local":
+ TIME_ZONE = pytz.utc.zone
USE_I18N = True
| {"golden_diff": "diff --git a/kolibri/deployment/default/settings/base.py b/kolibri/deployment/default/settings/base.py\n--- a/kolibri/deployment/default/settings/base.py\n+++ b/kolibri/deployment/default/settings/base.py\n@@ -13,6 +13,8 @@\n # Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n import os\n \n+import pytz\n+\n # import kolibri, so we can get the path to the module.\n import kolibri\n # we load other utilities related to i18n\n@@ -155,7 +157,18 @@\n \n LANGUAGE_CODE = conf.config.get(\"LANGUAGE_CODE\") or \"en\"\n \n-TIME_ZONE = get_localzone().zone\n+try:\n+ TIME_ZONE = get_localzone().zone\n+except pytz.UnknownTimeZoneError:\n+ # Do not fail at this point because a timezone was not\n+ # detected.\n+ TIME_ZONE = pytz.utc.zone\n+\n+# Fixes https://github.com/regebro/tzlocal/issues/44\n+# tzlocal 1.4 returns 'local' if unable to detect the timezone,\n+# and this TZ id is invalid\n+if TIME_ZONE == \"local\":\n+ TIME_ZONE = pytz.utc.zone\n \n USE_I18N = True\n", "issue": "Starting up Kolibri Development Server\n### Observed behavior\r\n\r\nTrying to run the Kolobri Development Server from Ubuntu Bash on Windows, but it raises a ValueError for incorrect timezone (local).\r\n\r\n### Expected behavior\r\n\r\nShould start up the Kolibri server on local machine.\r\n\r\n### Errors and logs\r\n\r\nRelevant errors and tracebacks from:\r\n\r\n*virtual environment command line:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/alan/.venvs/kolibri/bin/kolibri\", line 11, in <module>\r\n load_entry_point('kolibri', 'console_scripts', 'kolibri')()\r\n File \"/home/alan/Kolibri1/kolibri/kolibri/utils/cli.py\", line 607, in main\r\n initialize(debug=debug)\r\n File \"/home/alan/Kolibri1/kolibri/kolibri/utils/cli.py\", line 176, in initialize\r\n django.setup()\r\n File \"/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/__init__.py\", line 17, in setup\r\n configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)\r\n File \"/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py\", line 55, in __getattr__\r\n self._setup(name)\r\n File \"/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py\", line 43, in _setup\r\n self._wrapped = Settings(settings_module)\r\n File \"/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py\", line 138, in __init__\r\n raise ValueError(\"Incorrect timezone setting: %s\" % self.TIME_ZONE)\r\nValueError: Incorrect timezone setting: local\r\n\r\n```\r\n\r\n### Steps to reproduce\r\n\r\nRun command to start up Kolibri Development server. \r\n\r\nkolibri --debug manage devserver --webpack\r\n\r\n### Context\r\n\r\n* Kolibri version: Develop Branch\r\n* Operating system: Ubuntu Bash for Windows\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for kolibri project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\n# import kolibri, so we can get the path to the module.\nimport kolibri\n# we load other utilities related to i18n\n# This is essential! We load the kolibri conf INSIDE the Django conf\nfrom kolibri.utils import conf, i18n\nfrom tzlocal import get_localzone\n\nKOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__)\n\nBASE_DIR = os.path.abspath(os.path.dirname(__name__))\n\nKOLIBRI_HOME = os.environ['KOLIBRI_HOME']\n\nKOLIBRI_CORE_JS_NAME = 'kolibriGlobal'\n\nLOCALE_PATHS = [\n os.path.join(KOLIBRI_MODULE_PATH, \"locale\"),\n]\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = ['*']\n\n# Application definition\n\nINSTALLED_APPS = [\n 'kolibri.core',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'kolibri.auth.apps.KolibriAuthConfig',\n 'kolibri.content',\n 'kolibri.logger',\n 'kolibri.tasks.apps.KolibriTasksConfig',\n 'kolibri.core.webpack',\n 'kolibri.core.exams',\n 'kolibri.core.device',\n 'kolibri.core.discovery',\n 'rest_framework',\n 'django_js_reverse',\n 'jsonfield',\n 'morango',\n] + conf.config['INSTALLED_APPS']\n\n# Add in the external plugins' locale paths. Our frontend messages depends\n# specifically on the value of LOCALE_PATHS to find its catalog files.\nLOCALE_PATHS += [\n i18n.get_installed_app_locale_path(app) for app in INSTALLED_APPS\n if i18n.is_external_plugin(app)\n]\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'kolibri.core.device.middleware.KolibriLocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'kolibri.plugins.setup_wizard.middleware.SetupWizardMiddleware',\n 'kolibri.auth.middleware.CustomAuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\nQUEUE_JOB_STORAGE_PATH = os.path.join(KOLIBRI_HOME, \"job_storage.sqlite3\")\n\nROOT_URLCONF = 'kolibri.deployment.default.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'kolibri.core.context_processors.custom_context_processor.return_session',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'kolibri.deployment.default.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(KOLIBRI_HOME, 'db.sqlite3'),\n 'OPTIONS': {\n 'timeout': 100,\n }\n },\n}\n\n# Content directories and URLs for channel metadata and content files\n\n# Directory and URL for storing content databases for channel data\nCONTENT_DATABASE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'databases')\nif not os.path.exists(CONTENT_DATABASE_DIR):\n os.makedirs(CONTENT_DATABASE_DIR)\n\n# Directory and URL for storing de-duped content files for all channels\nCONTENT_STORAGE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'storage')\nif not os.path.exists(CONTENT_STORAGE_DIR):\n os.makedirs(CONTENT_STORAGE_DIR)\n\n# Base default URL for downloading content from an online server\nCENTRAL_CONTENT_DOWNLOAD_BASE_URL = \"http://studio.learningequality.org\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGES = [\n ('en', 'English'),\n ('sw-tz', 'Kiswahili'),\n ('es-es', 'Espa\u00f1ol'),\n ('es-mx', 'Espa\u00f1ol (M\u00e9xico)'),\n ('fr-fr', 'Fran\u00e7ais'),\n ('pt-pt', 'Portugu\u00eas'),\n ('hi-in', '\u0939\u093f\u0902\u0926\u0940'),\n ('ar-eg', '\u0627\u0644\u0639\u064e\u0631\u064e\u0628\u0650\u064a\u064e\u0651\u0629\u200e\u200e')\n]\n\nLANGUAGE_CODE = conf.config.get(\"LANGUAGE_CODE\") or \"en\"\n\nTIME_ZONE = get_localzone().zone\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(KOLIBRI_HOME, \"static\")\n\n# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-LOGGING\n# https://docs.djangoproject.com/en/1.9/topics/logging/\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n 'simple_date': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(message)s'\n },\n 'color': {\n '()': 'colorlog.ColoredFormatter',\n 'format': '%(log_color)s%(levelname)-8s %(message)s',\n 'log_colors': {\n 'DEBUG': 'bold_black',\n 'INFO': 'white',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'bold_red',\n },\n }\n },\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'color'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_debug_false'],\n },\n 'request_debug': {\n 'level': 'ERROR',\n 'class': 'logging.StreamHandler',\n 'formatter': 'color',\n 'filters': ['require_debug_true'],\n },\n 'file_debug': {\n 'level': 'DEBUG',\n 'filters': ['require_debug_true'],\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(KOLIBRI_HOME, 'debug.log'),\n 'formatter': 'simple_date',\n },\n 'file': {\n 'level': 'INFO',\n 'filters': [],\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(KOLIBRI_HOME, 'kolibri.log'),\n 'formatter': 'simple_date',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console', 'file'],\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['mail_admins', 'file', 'request_debug'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'kolibri': {\n 'handlers': ['console', 'mail_admins', 'file', 'file_debug'],\n 'level': 'INFO',\n }\n }\n}\n\n\n# Customizing Django auth system\n# https://docs.djangoproject.com/en/1.9/topics/auth/customizing/\n\nAUTH_USER_MODEL = 'kolibriauth.FacilityUser'\n\nAUTHENTICATION_BACKENDS = ['kolibri.auth.backends.FacilityUserBackend']\n\n\n# Django REST Framework\n# http://www.django-rest-framework.org/api-guide/settings/\n\nREST_FRAMEWORK = {\n \"UNAUTHENTICATED_USER\": \"kolibri.auth.models.KolibriAnonymousUser\",\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework_csv.renderers.CSVRenderer',\n ),\n}\n\n# System warnings to disable\n# see https://docs.djangoproject.com/en/1.9/ref/settings/#silenced-system-checks\nSILENCED_SYSTEM_CHECKS = [\"auth.W004\"]\n\n# Configuration for Django JS Reverse\n# https://github.com/ierror/django-js-reverse#options\n\nJS_REVERSE_JS_VAR_NAME = 'kolibriUrls'\n\nJS_REVERSE_EXCLUDE_NAMESPACES = ['admin', ]\n\nENABLE_DATA_BOOTSTRAPPING = True\n", "path": "kolibri/deployment/default/settings/base.py"}]} | 3,896 | 279 |
gh_patches_debug_34388 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-1908 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Save checkpoint for the last epoch
How to save the checkpoint only for the last epoch?
In the docs:
```
if save_top_k == k, the best k models according to the quantity monitored will be saved. if save_top_k == 0, no models are saved. if save_top_k == -1, all models are saved. Please note that the monitors are checked every period epochs. if save_top_k >= 2 and the callback is called multiple times inside an epoch, the name of the saved file will be appended with a version count starting with v0.
```
* `k = 0` does not save any.
* `k > 1` saves only a few the best
* `k = -1` saves all of them
Currently, I am using k = -1, but it space consuming.
</issue>
<code>
[start of pytorch_lightning/callbacks/model_checkpoint.py]
1 """
2 Model Checkpointing
3 ===================
4
5 Automatically save model checkpoints during training.
6
7 """
8
9 import os
10 import re
11
12 import numpy as np
13 from typing import Optional
14
15 import torch
16 from pytorch_lightning import _logger as log
17 from pytorch_lightning.callbacks.base import Callback
18 from pytorch_lightning.utilities import rank_zero_warn, rank_zero_only
19
20
21 class ModelCheckpoint(Callback):
22 r"""
23 Save the model after every epoch.
24
25 Args:
26 filepath: path to save the model file.
27 Can contain named formatting options to be auto-filled.
28
29 Example::
30
31 # custom path
32 # saves a file like: my/path/epoch_0.ckpt
33 >>> checkpoint_callback = ModelCheckpoint('my/path/')
34
35 # save any arbitrary metrics like `val_loss`, etc. in name
36 # saves a file like: my/path/epoch=2-val_loss=0.2_other_metric=0.3.ckpt
37 >>> checkpoint_callback = ModelCheckpoint(
38 ... filepath='my/path/{epoch}-{val_loss:.2f}-{other_metric:.2f}'
39 ... )
40
41 Can also be set to `None`, then it will be set to default location
42 during trainer construction.
43
44 monitor: quantity to monitor.
45 verbose: verbosity mode. Default: ``False``.
46 save_top_k: if `save_top_k == k`,
47 the best k models according to
48 the quantity monitored will be saved.
49 if ``save_top_k == 0``, no models are saved.
50 if ``save_top_k == -1``, all models are saved.
51 Please note that the monitors are checked every `period` epochs.
52 if ``save_top_k >= 2`` and the callback is called multiple
53 times inside an epoch, the name of the saved file will be
54 appended with a version count starting with `v0`.
55 mode: one of {auto, min, max}.
56 If ``save_top_k != 0``, the decision
57 to overwrite the current save file is made
58 based on either the maximization or the
59 minimization of the monitored quantity. For `val_acc`,
60 this should be `max`, for `val_loss` this should
61 be `min`, etc. In `auto` mode, the direction is
62 automatically inferred from the name of the monitored quantity.
63 save_weights_only: if ``True``, then only the model's weights will be
64 saved (``model.save_weights(filepath)``), else the full model
65 is saved (``model.save(filepath)``).
66 period: Interval (number of epochs) between checkpoints.
67
68 Example::
69
70 >>> from pytorch_lightning import Trainer
71 >>> from pytorch_lightning.callbacks import ModelCheckpoint
72
73 # saves checkpoints to 'my/path/' whenever 'val_loss' has a new min
74 >>> checkpoint_callback = ModelCheckpoint(filepath='my/path/')
75 >>> trainer = Trainer(checkpoint_callback=checkpoint_callback)
76
77 # save epoch and val_loss in name
78 # saves a file like: my/path/sample-mnist_epoch=02_val_loss=0.32.ckpt
79 >>> checkpoint_callback = ModelCheckpoint(
80 ... filepath='my/path/sample-mnist_{epoch:02d}-{val_loss:.2f}'
81 ... )
82
83 """
84
85 def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False,
86 save_top_k: int = 1, save_weights_only: bool = False,
87 mode: str = 'auto', period: int = 1, prefix: str = ''):
88 super().__init__()
89 if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:
90 rank_zero_warn(
91 f"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0."
92 "All files in this directory will be deleted when a checkpoint is saved!"
93 )
94 self._rank = 0
95
96 self.monitor = monitor
97 self.verbose = verbose
98 if filepath is None: # will be determined by trainer at runtime
99 self.dirpath, self.filename = None, None
100 else:
101 if os.path.isdir(filepath):
102 self.dirpath, self.filename = filepath, '{epoch}'
103 else:
104 self.dirpath, self.filename = os.path.split(filepath)
105 os.makedirs(self.dirpath, exist_ok=True)
106 self.save_top_k = save_top_k
107 self.save_weights_only = save_weights_only
108 self.period = period
109 self.epoch_last_check = None
110 self.prefix = prefix
111 self.best_k_models = {}
112 # {filename: monitor}
113 self.kth_best_model = ''
114 self.best = 0
115 self.save_function = None
116
117 torch_inf = torch.tensor(np.Inf)
118 mode_dict = {
119 'min': (torch_inf, 'min'),
120 'max': (-torch_inf, 'max'),
121 'auto': (-torch_inf, 'max') if 'acc' in self.monitor or self.monitor.startswith('fmeasure')
122 else (torch_inf, 'min'),
123 }
124
125 if mode not in mode_dict:
126 rank_zero_warn(f'ModelCheckpoint mode {mode} is unknown, '
127 f'fallback to auto mode.', RuntimeWarning)
128 mode = 'auto'
129
130 self.kth_value, self.mode = mode_dict[mode]
131
132 def _del_model(self, filepath):
133 if os.path.isfile(filepath):
134 os.remove(filepath)
135
136 def _save_model(self, filepath):
137 # make paths
138 os.makedirs(os.path.dirname(filepath), exist_ok=True)
139
140 # delegate the saving to the model
141 if self.save_function is not None:
142 self.save_function(filepath, self.save_weights_only)
143 else:
144 raise ValueError(".save_function() not set")
145
146 def check_monitor_top_k(self, current):
147 less_than_k_models = len(self.best_k_models) < self.save_top_k
148 if less_than_k_models:
149 return True
150
151 if not isinstance(current, torch.Tensor):
152 rank_zero_warn(
153 f'{current} is supposed to be a torch.Tensor. Saving checkpoint may not work correctly. '
154 f'HINT: check the value of {self.monitor} in your validation loop', RuntimeWarning
155 )
156 current = torch.tensor(current)
157
158 monitor_op = {
159 "min": torch.lt,
160 "max": torch.gt,
161 }[self.mode]
162
163 return monitor_op(current, self.best_k_models[self.kth_best_model])
164
165 def format_checkpoint_name(self, epoch, metrics, ver=None):
166 """Generate a filename according to the defined template.
167
168 Example::
169
170 >>> tmpdir = os.path.dirname(__file__)
171 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}'))
172 >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))
173 'epoch=0.ckpt'
174 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch:03d}'))
175 >>> os.path.basename(ckpt.format_checkpoint_name(5, {}))
176 'epoch=005.ckpt'
177 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}-{val_loss:.2f}'))
178 >>> os.path.basename(ckpt.format_checkpoint_name(2, dict(val_loss=0.123456)))
179 'epoch=2-val_loss=0.12.ckpt'
180 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{missing:d}'))
181 >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))
182 'missing=0.ckpt'
183 """
184 # check if user passed in keys to the string
185 groups = re.findall(r'(\{.*?)[:\}]', self.filename)
186
187 if len(groups) == 0:
188 # default name
189 filename = f'{self.prefix}_ckpt_epoch_{epoch}'
190 else:
191 metrics['epoch'] = epoch
192 filename = self.filename
193 for tmp in groups:
194 name = tmp[1:]
195 filename = filename.replace(tmp, name + '={' + name)
196 if name not in metrics:
197 metrics[name] = 0
198 filename = filename.format(**metrics)
199 str_ver = f'_v{ver}' if ver is not None else ''
200 filepath = os.path.join(self.dirpath, self.prefix + filename + str_ver + '.ckpt')
201 return filepath
202
203 @rank_zero_only
204 def on_validation_end(self, trainer, pl_module):
205 # only run on main process
206 if trainer.proc_rank != 0:
207 return
208
209 metrics = trainer.callback_metrics
210 epoch = trainer.current_epoch
211 if self.save_top_k == 0:
212 # no models are saved
213 return
214 if self.epoch_last_check is not None and (epoch - self.epoch_last_check) < self.period:
215 # skipping in this term
216 return
217
218 self.epoch_last_check = epoch
219
220 filepath = self.format_checkpoint_name(epoch, metrics)
221 version_cnt = 0
222 while os.path.isfile(filepath):
223 filepath = self.format_checkpoint_name(epoch, metrics, ver=version_cnt)
224 # this epoch called before
225 version_cnt += 1
226
227 if self.save_top_k != -1:
228 current = metrics.get(self.monitor)
229
230 if not isinstance(current, torch.Tensor):
231 rank_zero_warn(
232 f'The metric you returned {current} must be a Torch.Tensor instance, checkpoint not saved '
233 f'HINT: what is the value of {self.monitor} in validation_end()?', RuntimeWarning
234 )
235
236 if current is None:
237 rank_zero_warn(
238 f'Can save best model only with {self.monitor} available, skipping.', RuntimeWarning
239 )
240 elif self.check_monitor_top_k(current):
241 self._do_check_save(filepath, current, epoch)
242 elif self.verbose > 0:
243 log.info(f'\nEpoch {epoch:05d}: {self.monitor} was not in top {self.save_top_k}')
244
245 else:
246 if self.verbose > 0:
247 log.info(f'\nEpoch {epoch:05d}: saving model to {filepath}')
248 self._save_model(filepath)
249
250 def _do_check_save(self, filepath, current, epoch):
251 # remove kth
252
253 del_list = []
254 if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0:
255 delpath = self.kth_best_model
256 self.best_k_models.pop(self.kth_best_model)
257 del_list.append(delpath)
258
259 self.best_k_models[filepath] = current
260 if len(self.best_k_models) == self.save_top_k:
261 # monitor dict has reached k elements
262 _op = max if self.mode == 'min' else min
263 self.kth_best_model = _op(self.best_k_models,
264 key=self.best_k_models.get)
265 self.kth_value = self.best_k_models[self.kth_best_model]
266
267 _op = min if self.mode == 'min' else max
268 self.best = _op(self.best_k_models.values())
269
270 if self.verbose > 0:
271 log.info(
272 f'\nEpoch {epoch:05d}: {self.monitor} reached'
273 f' {current:0.5f} (best {self.best:0.5f}), saving model to'
274 f' {filepath} as top {self.save_top_k}')
275 self._save_model(filepath)
276
277 for cur_path in del_list:
278 if cur_path != filepath:
279 self._del_model(cur_path)
280
[end of pytorch_lightning/callbacks/model_checkpoint.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/callbacks/model_checkpoint.py b/pytorch_lightning/callbacks/model_checkpoint.py
--- a/pytorch_lightning/callbacks/model_checkpoint.py
+++ b/pytorch_lightning/callbacks/model_checkpoint.py
@@ -43,6 +43,7 @@
monitor: quantity to monitor.
verbose: verbosity mode. Default: ``False``.
+ save_last: always saves the model at the end of the epoch. Default: ``False``.
save_top_k: if `save_top_k == k`,
the best k models according to
the quantity monitored will be saved.
@@ -83,7 +84,7 @@
"""
def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False,
- save_top_k: int = 1, save_weights_only: bool = False,
+ save_last: bool = False, save_top_k: int = 1, save_weights_only: bool = False,
mode: str = 'auto', period: int = 1, prefix: str = ''):
super().__init__()
if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:
@@ -103,6 +104,7 @@
else:
self.dirpath, self.filename = os.path.split(filepath)
os.makedirs(self.dirpath, exist_ok=True)
+ self.save_last = save_last
self.save_top_k = save_top_k
self.save_weights_only = save_weights_only
self.period = period
@@ -217,6 +219,10 @@
self.epoch_last_check = epoch
+ if self.save_last:
+ filepath = os.path.join(self.dirpath, self.prefix + 'last.ckpt')
+ self._save_model(filepath)
+
filepath = self.format_checkpoint_name(epoch, metrics)
version_cnt = 0
while os.path.isfile(filepath):
| {"golden_diff": "diff --git a/pytorch_lightning/callbacks/model_checkpoint.py b/pytorch_lightning/callbacks/model_checkpoint.py\n--- a/pytorch_lightning/callbacks/model_checkpoint.py\n+++ b/pytorch_lightning/callbacks/model_checkpoint.py\n@@ -43,6 +43,7 @@\n \n monitor: quantity to monitor.\n verbose: verbosity mode. Default: ``False``.\n+ save_last: always saves the model at the end of the epoch. Default: ``False``.\n save_top_k: if `save_top_k == k`,\n the best k models according to\n the quantity monitored will be saved.\n@@ -83,7 +84,7 @@\n \"\"\"\n \n def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False,\n- save_top_k: int = 1, save_weights_only: bool = False,\n+ save_last: bool = False, save_top_k: int = 1, save_weights_only: bool = False,\n mode: str = 'auto', period: int = 1, prefix: str = ''):\n super().__init__()\n if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:\n@@ -103,6 +104,7 @@\n else:\n self.dirpath, self.filename = os.path.split(filepath)\n os.makedirs(self.dirpath, exist_ok=True)\n+ self.save_last = save_last\n self.save_top_k = save_top_k\n self.save_weights_only = save_weights_only\n self.period = period\n@@ -217,6 +219,10 @@\n \n self.epoch_last_check = epoch\n \n+ if self.save_last:\n+ filepath = os.path.join(self.dirpath, self.prefix + 'last.ckpt')\n+ self._save_model(filepath)\n+\n filepath = self.format_checkpoint_name(epoch, metrics)\n version_cnt = 0\n while os.path.isfile(filepath):\n", "issue": "Save checkpoint for the last epoch\nHow to save the checkpoint only for the last epoch?\r\nIn the docs:\r\n\r\n```\r\nif save_top_k == k, the best k models according to the quantity monitored will be saved. if save_top_k == 0, no models are saved. if save_top_k == -1, all models are saved. Please note that the monitors are checked every period epochs. if save_top_k >= 2 and the callback is called multiple times inside an epoch, the name of the saved file will be appended with a version count starting with v0.\r\n```\r\n* `k = 0` does not save any.\r\n* `k > 1` saves only a few the best\r\n* `k = -1` saves all of them\r\n\r\nCurrently, I am using k = -1, but it space consuming.\n", "before_files": [{"content": "\"\"\"\nModel Checkpointing\n===================\n\nAutomatically save model checkpoints during training.\n\n\"\"\"\n\nimport os\nimport re\n\nimport numpy as np\nfrom typing import Optional\n\nimport torch\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.callbacks.base import Callback\nfrom pytorch_lightning.utilities import rank_zero_warn, rank_zero_only\n\n\nclass ModelCheckpoint(Callback):\n r\"\"\"\n Save the model after every epoch.\n\n Args:\n filepath: path to save the model file.\n Can contain named formatting options to be auto-filled.\n\n Example::\n\n # custom path\n # saves a file like: my/path/epoch_0.ckpt\n >>> checkpoint_callback = ModelCheckpoint('my/path/')\n\n # save any arbitrary metrics like `val_loss`, etc. in name\n # saves a file like: my/path/epoch=2-val_loss=0.2_other_metric=0.3.ckpt\n >>> checkpoint_callback = ModelCheckpoint(\n ... filepath='my/path/{epoch}-{val_loss:.2f}-{other_metric:.2f}'\n ... )\n\n Can also be set to `None`, then it will be set to default location\n during trainer construction.\n\n monitor: quantity to monitor.\n verbose: verbosity mode. Default: ``False``.\n save_top_k: if `save_top_k == k`,\n the best k models according to\n the quantity monitored will be saved.\n if ``save_top_k == 0``, no models are saved.\n if ``save_top_k == -1``, all models are saved.\n Please note that the monitors are checked every `period` epochs.\n if ``save_top_k >= 2`` and the callback is called multiple\n times inside an epoch, the name of the saved file will be\n appended with a version count starting with `v0`.\n mode: one of {auto, min, max}.\n If ``save_top_k != 0``, the decision\n to overwrite the current save file is made\n based on either the maximization or the\n minimization of the monitored quantity. For `val_acc`,\n this should be `max`, for `val_loss` this should\n be `min`, etc. In `auto` mode, the direction is\n automatically inferred from the name of the monitored quantity.\n save_weights_only: if ``True``, then only the model's weights will be\n saved (``model.save_weights(filepath)``), else the full model\n is saved (``model.save(filepath)``).\n period: Interval (number of epochs) between checkpoints.\n\n Example::\n\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.callbacks import ModelCheckpoint\n\n # saves checkpoints to 'my/path/' whenever 'val_loss' has a new min\n >>> checkpoint_callback = ModelCheckpoint(filepath='my/path/')\n >>> trainer = Trainer(checkpoint_callback=checkpoint_callback)\n\n # save epoch and val_loss in name\n # saves a file like: my/path/sample-mnist_epoch=02_val_loss=0.32.ckpt\n >>> checkpoint_callback = ModelCheckpoint(\n ... filepath='my/path/sample-mnist_{epoch:02d}-{val_loss:.2f}'\n ... )\n\n \"\"\"\n\n def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False,\n save_top_k: int = 1, save_weights_only: bool = False,\n mode: str = 'auto', period: int = 1, prefix: str = ''):\n super().__init__()\n if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:\n rank_zero_warn(\n f\"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0.\"\n \"All files in this directory will be deleted when a checkpoint is saved!\"\n )\n self._rank = 0\n\n self.monitor = monitor\n self.verbose = verbose\n if filepath is None: # will be determined by trainer at runtime\n self.dirpath, self.filename = None, None\n else:\n if os.path.isdir(filepath):\n self.dirpath, self.filename = filepath, '{epoch}'\n else:\n self.dirpath, self.filename = os.path.split(filepath)\n os.makedirs(self.dirpath, exist_ok=True)\n self.save_top_k = save_top_k\n self.save_weights_only = save_weights_only\n self.period = period\n self.epoch_last_check = None\n self.prefix = prefix\n self.best_k_models = {}\n # {filename: monitor}\n self.kth_best_model = ''\n self.best = 0\n self.save_function = None\n\n torch_inf = torch.tensor(np.Inf)\n mode_dict = {\n 'min': (torch_inf, 'min'),\n 'max': (-torch_inf, 'max'),\n 'auto': (-torch_inf, 'max') if 'acc' in self.monitor or self.monitor.startswith('fmeasure')\n else (torch_inf, 'min'),\n }\n\n if mode not in mode_dict:\n rank_zero_warn(f'ModelCheckpoint mode {mode} is unknown, '\n f'fallback to auto mode.', RuntimeWarning)\n mode = 'auto'\n\n self.kth_value, self.mode = mode_dict[mode]\n\n def _del_model(self, filepath):\n if os.path.isfile(filepath):\n os.remove(filepath)\n\n def _save_model(self, filepath):\n # make paths\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n # delegate the saving to the model\n if self.save_function is not None:\n self.save_function(filepath, self.save_weights_only)\n else:\n raise ValueError(\".save_function() not set\")\n\n def check_monitor_top_k(self, current):\n less_than_k_models = len(self.best_k_models) < self.save_top_k\n if less_than_k_models:\n return True\n\n if not isinstance(current, torch.Tensor):\n rank_zero_warn(\n f'{current} is supposed to be a torch.Tensor. Saving checkpoint may not work correctly. '\n f'HINT: check the value of {self.monitor} in your validation loop', RuntimeWarning\n )\n current = torch.tensor(current)\n\n monitor_op = {\n \"min\": torch.lt,\n \"max\": torch.gt,\n }[self.mode]\n\n return monitor_op(current, self.best_k_models[self.kth_best_model])\n\n def format_checkpoint_name(self, epoch, metrics, ver=None):\n \"\"\"Generate a filename according to the defined template.\n\n Example::\n\n >>> tmpdir = os.path.dirname(__file__)\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))\n 'epoch=0.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch:03d}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(5, {}))\n 'epoch=005.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}-{val_loss:.2f}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(2, dict(val_loss=0.123456)))\n 'epoch=2-val_loss=0.12.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{missing:d}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))\n 'missing=0.ckpt'\n \"\"\"\n # check if user passed in keys to the string\n groups = re.findall(r'(\\{.*?)[:\\}]', self.filename)\n\n if len(groups) == 0:\n # default name\n filename = f'{self.prefix}_ckpt_epoch_{epoch}'\n else:\n metrics['epoch'] = epoch\n filename = self.filename\n for tmp in groups:\n name = tmp[1:]\n filename = filename.replace(tmp, name + '={' + name)\n if name not in metrics:\n metrics[name] = 0\n filename = filename.format(**metrics)\n str_ver = f'_v{ver}' if ver is not None else ''\n filepath = os.path.join(self.dirpath, self.prefix + filename + str_ver + '.ckpt')\n return filepath\n\n @rank_zero_only\n def on_validation_end(self, trainer, pl_module):\n # only run on main process\n if trainer.proc_rank != 0:\n return\n\n metrics = trainer.callback_metrics\n epoch = trainer.current_epoch\n if self.save_top_k == 0:\n # no models are saved\n return\n if self.epoch_last_check is not None and (epoch - self.epoch_last_check) < self.period:\n # skipping in this term\n return\n\n self.epoch_last_check = epoch\n\n filepath = self.format_checkpoint_name(epoch, metrics)\n version_cnt = 0\n while os.path.isfile(filepath):\n filepath = self.format_checkpoint_name(epoch, metrics, ver=version_cnt)\n # this epoch called before\n version_cnt += 1\n\n if self.save_top_k != -1:\n current = metrics.get(self.monitor)\n\n if not isinstance(current, torch.Tensor):\n rank_zero_warn(\n f'The metric you returned {current} must be a Torch.Tensor instance, checkpoint not saved '\n f'HINT: what is the value of {self.monitor} in validation_end()?', RuntimeWarning\n )\n\n if current is None:\n rank_zero_warn(\n f'Can save best model only with {self.monitor} available, skipping.', RuntimeWarning\n )\n elif self.check_monitor_top_k(current):\n self._do_check_save(filepath, current, epoch)\n elif self.verbose > 0:\n log.info(f'\\nEpoch {epoch:05d}: {self.monitor} was not in top {self.save_top_k}')\n\n else:\n if self.verbose > 0:\n log.info(f'\\nEpoch {epoch:05d}: saving model to {filepath}')\n self._save_model(filepath)\n\n def _do_check_save(self, filepath, current, epoch):\n # remove kth\n\n del_list = []\n if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0:\n delpath = self.kth_best_model\n self.best_k_models.pop(self.kth_best_model)\n del_list.append(delpath)\n\n self.best_k_models[filepath] = current\n if len(self.best_k_models) == self.save_top_k:\n # monitor dict has reached k elements\n _op = max if self.mode == 'min' else min\n self.kth_best_model = _op(self.best_k_models,\n key=self.best_k_models.get)\n self.kth_value = self.best_k_models[self.kth_best_model]\n\n _op = min if self.mode == 'min' else max\n self.best = _op(self.best_k_models.values())\n\n if self.verbose > 0:\n log.info(\n f'\\nEpoch {epoch:05d}: {self.monitor} reached'\n f' {current:0.5f} (best {self.best:0.5f}), saving model to'\n f' {filepath} as top {self.save_top_k}')\n self._save_model(filepath)\n\n for cur_path in del_list:\n if cur_path != filepath:\n self._del_model(cur_path)\n", "path": "pytorch_lightning/callbacks/model_checkpoint.py"}]} | 3,920 | 435 |
gh_patches_debug_34714 | rasdani/github-patches | git_diff | aws__aws-cli-206 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cloudformation describe-stack-events --output table omits ResourceStatusReason
aws-cli/0.13.2 Python/2.7.5 Darwin/12.4.1
This may be an RFE. With a command like `aws cloudformation describe-stack-events --output table ...` the table output does not include the `ResourceStatusReason` data. This is useful data when failures occur. For example here is some sample output with an interesting `ResourceStatusReason`.
```
{
"StackId": "arn:aws:cloudformation:us-west-2:317324027142:stack/spot-3x-m1large/e1fa9ac0-f985-11e2-aa7f-507bfc8736d2",
"EventId": "LaunchConfig1-CREATE_FAILED-1375236694000",
"ResourceStatus": "CREATE_FAILED",
"ResourceType": "AWS::AutoScaling::LaunchConfiguration",
"Timestamp": "2013-07-31T02:11:34Z",
"ResourceStatusReason": "AMI cannot be described",
"StackName": "spot-3x-m1large",
"PhysicalResourceId": "spot-3x-m1large-LaunchConfig1-EFTX7ICLP050",
"LogicalResourceId": "LaunchConfig1"
}
```
</issue>
<code>
[start of awscli/formatter.py]
1 # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6
7 # http://aws.amazon.com/apache2.0/
8
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import logging
14 import sys
15 import json
16
17 import six
18
19 from awscli.table import MultiTable, Styler, ColorizedStyler
20
21
22 LOG = logging.getLogger(__name__)
23
24
25 class Formatter(object):
26 def __init__(self, args):
27 self._args = args
28
29 def _remove_request_id(self, response_data):
30 # We only want to display the ResponseMetadata (which includes
31 # the request id) if there is an error in the response.
32 # Since all errors have been unified under the Errors key,
33 # this should be a reasonable way to filter.
34 if 'Errors' not in response_data:
35 if 'ResponseMetadata' in response_data:
36 if 'RequestId' in response_data['ResponseMetadata']:
37 request_id = response_data['ResponseMetadata']['RequestId']
38 LOG.debug('RequestId: %s', request_id)
39 del response_data['ResponseMetadata']
40
41
42 class FullyBufferedFormatter(Formatter):
43 def __call__(self, operation, response, stream=None):
44 if stream is None:
45 # Retrieve stdout on invocation instead of at import time
46 # so that if anything wraps stdout we'll pick up those changes
47 # (specifically colorama on windows wraps stdout).
48 stream = sys.stdout
49 # I think the interfaces between non-paginated
50 # and paginated responses can still be cleaned up.
51 if operation.can_paginate and self._args.paginate:
52 response_data = response.build_full_result()
53 else:
54 response_data = response
55 try:
56 self._remove_request_id(response_data)
57 self._format_response(operation, response_data, stream)
58 finally:
59 # flush is needed to avoid the "close failed in file object
60 # destructor" in python2.x (see http://bugs.python.org/issue11380).
61 stream.flush()
62
63
64 class JSONFormatter(FullyBufferedFormatter):
65
66 def _format_response(self, operation, response, stream):
67 # For operations that have no response body (e.g. s3 put-object)
68 # the response will be an empty string. We don't want to print
69 # that out to the user but other "falsey" values like an empty
70 # dictionary should be printed.
71 if response != '':
72 json.dump(response, stream, indent=4)
73 stream.write('\n')
74
75
76 class TableFormatter(FullyBufferedFormatter):
77 """Pretty print a table from a given response.
78
79 The table formatter is able to take any generic response
80 and generate a pretty printed table. It does this without
81 using the output definition from the model.
82
83 """
84 def __init__(self, args, table=None):
85 super(TableFormatter, self).__init__(args)
86 if args.color == 'auto':
87 self.table = MultiTable(initial_section=False,
88 column_separator='|')
89 elif args.color == 'off':
90 styler = Styler()
91 self.table = MultiTable(initial_section=False,
92 column_separator='|', styler=styler)
93 elif args.color == 'on':
94 styler = ColorizedStyler()
95 self.table = MultiTable(initial_section=False,
96 column_separator='|', styler=styler)
97 else:
98 raise ValueError("Unknown color option: %s" % args.color)
99
100 def _format_response(self, operation, response, stream):
101 if self._build_table(operation.name, response):
102 try:
103 self.table.render(stream)
104 except IOError:
105 # If they're piping stdout to another process which exits before
106 # we're done writing all of our output, we'll get an error about a
107 # closed pipe which we can safely ignore.
108 pass
109
110 def _build_table(self, title, current, indent_level=0):
111 if not current:
112 return False
113 self.table.new_section(title, indent_level=indent_level)
114 if isinstance(current, list):
115 if isinstance(current[0], dict):
116 self._build_sub_table_from_list(current, indent_level, title)
117 else:
118 for item in current:
119 self.table.add_row([item])
120 if isinstance(current, dict):
121 # Render a single row section with keys as header
122 # and the row as the values, unless the value
123 # is a list.
124 self._build_sub_table_from_dict(current, indent_level)
125 return True
126
127 def _build_sub_table_from_dict(self, current, indent_level):
128 # Render a single row section with keys as header
129 # and the row as the values, unless the value
130 # is a list.
131 headers, more = self._group_scalar_keys(current)
132 if len(headers) == 1:
133 # Special casing if a dict has a single scalar key/value pair.
134 self.table.add_row([headers[0], current[headers[0]]])
135 elif headers:
136 self.table.add_row_header(headers)
137 self.table.add_row([current[k] for k in headers])
138 for remaining in more:
139 self._build_table(remaining, current[remaining],
140 indent_level=indent_level + 1)
141
142 def _build_sub_table_from_list(self, current, indent_level, title):
143 headers, more = self._group_scalar_keys(current[0])
144 self.table.add_row_header(headers)
145 first = True
146 for element in current:
147 if not first and more:
148 self.table.new_section(title,
149 indent_level=indent_level)
150 self.table.add_row_header(headers)
151 first = False
152 self.table.add_row([element[header] for header in headers])
153 for remaining in more:
154 # Some of the non scalar attributes may not necessarily
155 # be in every single element of the list, so we need to
156 # check this condition before recursing.
157 if remaining in element:
158 self._build_table(remaining, element[remaining],
159 indent_level=indent_level + 1)
160
161 def _scalar_type(self, element):
162 return not isinstance(element, (list, dict))
163
164 def _group_scalar_keys(self, current):
165 # Given a dict, separate the keys into those whose values are
166 # scalar, and those whose values aren't. Return two lists,
167 # one is the scalar value keys, the second is the remaining keys.
168 more = []
169 headers = []
170 for element in current:
171 if self._scalar_type(current[element]):
172 headers.append(element)
173 else:
174 more.append(element)
175 headers.sort()
176 more.sort()
177 return headers, more
178
179
180 class TextFormatter(FullyBufferedFormatter):
181
182 def _output(self, data, stream, label=None):
183 """
184 A very simple, very stupid text formatter that has no
185 knowledge of the output as defined in the JSON model.
186 """
187 if isinstance(data, dict):
188 scalars = []
189 non_scalars = []
190 for key, val in data.items():
191 if isinstance(val, dict):
192 non_scalars.append((key, val))
193 elif isinstance(val, list):
194 non_scalars.append((key, val))
195 elif not isinstance(val, six.string_types):
196 scalars.append(str(val))
197 else:
198 scalars.append(val)
199 if label:
200 scalars.insert(0, label.upper())
201 stream.write('\t'.join(scalars))
202 stream.write('\n')
203 for label, non_scalar in non_scalars:
204 self._output(non_scalar, stream, label)
205 elif isinstance(data, list):
206 for d in data:
207 self._output(d, stream)
208
209 def _format_response(self, operation, response, stream):
210 self._output(response, stream)
211
212
213 def get_formatter(format_type, args):
214 if format_type == 'json':
215 return JSONFormatter(args)
216 elif format_type == 'text':
217 return TextFormatter(args)
218 elif format_type == 'table':
219 return TableFormatter(args)
220 return None
221
[end of awscli/formatter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/formatter.py b/awscli/formatter.py
--- a/awscli/formatter.py
+++ b/awscli/formatter.py
@@ -140,7 +140,7 @@
indent_level=indent_level + 1)
def _build_sub_table_from_list(self, current, indent_level, title):
- headers, more = self._group_scalar_keys(current[0])
+ headers, more = self._group_scalar_keys_from_list(current)
self.table.add_row_header(headers)
first = True
for element in current:
@@ -149,7 +149,9 @@
indent_level=indent_level)
self.table.add_row_header(headers)
first = False
- self.table.add_row([element[header] for header in headers])
+ # Use .get() to account for the fact that sometimes an element
+ # may not have all the keys from the header.
+ self.table.add_row([element.get(header, '') for header in headers])
for remaining in more:
# Some of the non scalar attributes may not necessarily
# be in every single element of the list, so we need to
@@ -161,6 +163,20 @@
def _scalar_type(self, element):
return not isinstance(element, (list, dict))
+ def _group_scalar_keys_from_list(self, list_of_dicts):
+ # We want to make sure we catch all the keys in the list of dicts.
+ # Most of the time each list element has the same keys, but sometimes
+ # a list element will have keys not defined in other elements.
+ headers = set()
+ more = set()
+ for item in list_of_dicts:
+ current_headers, current_more = self._group_scalar_keys(item)
+ headers.update(current_headers)
+ more.update(current_more)
+ headers = list(sorted(headers))
+ more = list(sorted(more))
+ return headers, more
+
def _group_scalar_keys(self, current):
# Given a dict, separate the keys into those whose values are
# scalar, and those whose values aren't. Return two lists,
| {"golden_diff": "diff --git a/awscli/formatter.py b/awscli/formatter.py\n--- a/awscli/formatter.py\n+++ b/awscli/formatter.py\n@@ -140,7 +140,7 @@\n indent_level=indent_level + 1)\n \n def _build_sub_table_from_list(self, current, indent_level, title):\n- headers, more = self._group_scalar_keys(current[0])\n+ headers, more = self._group_scalar_keys_from_list(current)\n self.table.add_row_header(headers)\n first = True\n for element in current:\n@@ -149,7 +149,9 @@\n indent_level=indent_level)\n self.table.add_row_header(headers)\n first = False\n- self.table.add_row([element[header] for header in headers])\n+ # Use .get() to account for the fact that sometimes an element\n+ # may not have all the keys from the header.\n+ self.table.add_row([element.get(header, '') for header in headers])\n for remaining in more:\n # Some of the non scalar attributes may not necessarily\n # be in every single element of the list, so we need to\n@@ -161,6 +163,20 @@\n def _scalar_type(self, element):\n return not isinstance(element, (list, dict))\n \n+ def _group_scalar_keys_from_list(self, list_of_dicts):\n+ # We want to make sure we catch all the keys in the list of dicts.\n+ # Most of the time each list element has the same keys, but sometimes\n+ # a list element will have keys not defined in other elements.\n+ headers = set()\n+ more = set()\n+ for item in list_of_dicts:\n+ current_headers, current_more = self._group_scalar_keys(item)\n+ headers.update(current_headers)\n+ more.update(current_more)\n+ headers = list(sorted(headers))\n+ more = list(sorted(more))\n+ return headers, more\n+\n def _group_scalar_keys(self, current):\n # Given a dict, separate the keys into those whose values are\n # scalar, and those whose values aren't. Return two lists,\n", "issue": "cloudformation describe-stack-events --output table omits ResourceStatusReason\naws-cli/0.13.2 Python/2.7.5 Darwin/12.4.1\n\nThis may be an RFE. With a command like `aws cloudformation describe-stack-events --output table ...` the table output does not include the `ResourceStatusReason` data. This is useful data when failures occur. For example here is some sample output with an interesting `ResourceStatusReason`.\n\n```\n{\n \"StackId\": \"arn:aws:cloudformation:us-west-2:317324027142:stack/spot-3x-m1large/e1fa9ac0-f985-11e2-aa7f-507bfc8736d2\", \n \"EventId\": \"LaunchConfig1-CREATE_FAILED-1375236694000\", \n \"ResourceStatus\": \"CREATE_FAILED\", \n \"ResourceType\": \"AWS::AutoScaling::LaunchConfiguration\", \n \"Timestamp\": \"2013-07-31T02:11:34Z\", \n \"ResourceStatusReason\": \"AMI cannot be described\", \n \"StackName\": \"spot-3x-m1large\", \n \"PhysicalResourceId\": \"spot-3x-m1large-LaunchConfig1-EFTX7ICLP050\", \n \"LogicalResourceId\": \"LaunchConfig1\"\n}\n```\n\n", "before_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n\n# http://aws.amazon.com/apache2.0/\n\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport logging\nimport sys\nimport json\n\nimport six\n\nfrom awscli.table import MultiTable, Styler, ColorizedStyler\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass Formatter(object):\n def __init__(self, args):\n self._args = args\n\n def _remove_request_id(self, response_data):\n # We only want to display the ResponseMetadata (which includes\n # the request id) if there is an error in the response.\n # Since all errors have been unified under the Errors key,\n # this should be a reasonable way to filter.\n if 'Errors' not in response_data:\n if 'ResponseMetadata' in response_data:\n if 'RequestId' in response_data['ResponseMetadata']:\n request_id = response_data['ResponseMetadata']['RequestId']\n LOG.debug('RequestId: %s', request_id)\n del response_data['ResponseMetadata']\n\n\nclass FullyBufferedFormatter(Formatter):\n def __call__(self, operation, response, stream=None):\n if stream is None:\n # Retrieve stdout on invocation instead of at import time\n # so that if anything wraps stdout we'll pick up those changes\n # (specifically colorama on windows wraps stdout).\n stream = sys.stdout\n # I think the interfaces between non-paginated\n # and paginated responses can still be cleaned up.\n if operation.can_paginate and self._args.paginate:\n response_data = response.build_full_result()\n else:\n response_data = response\n try:\n self._remove_request_id(response_data)\n self._format_response(operation, response_data, stream)\n finally:\n # flush is needed to avoid the \"close failed in file object\n # destructor\" in python2.x (see http://bugs.python.org/issue11380).\n stream.flush()\n\n\nclass JSONFormatter(FullyBufferedFormatter):\n\n def _format_response(self, operation, response, stream):\n # For operations that have no response body (e.g. s3 put-object)\n # the response will be an empty string. We don't want to print\n # that out to the user but other \"falsey\" values like an empty\n # dictionary should be printed.\n if response != '':\n json.dump(response, stream, indent=4)\n stream.write('\\n')\n\n\nclass TableFormatter(FullyBufferedFormatter):\n \"\"\"Pretty print a table from a given response.\n\n The table formatter is able to take any generic response\n and generate a pretty printed table. It does this without\n using the output definition from the model.\n\n \"\"\"\n def __init__(self, args, table=None):\n super(TableFormatter, self).__init__(args)\n if args.color == 'auto':\n self.table = MultiTable(initial_section=False,\n column_separator='|')\n elif args.color == 'off':\n styler = Styler()\n self.table = MultiTable(initial_section=False,\n column_separator='|', styler=styler)\n elif args.color == 'on':\n styler = ColorizedStyler()\n self.table = MultiTable(initial_section=False,\n column_separator='|', styler=styler)\n else:\n raise ValueError(\"Unknown color option: %s\" % args.color)\n\n def _format_response(self, operation, response, stream):\n if self._build_table(operation.name, response):\n try:\n self.table.render(stream)\n except IOError:\n # If they're piping stdout to another process which exits before\n # we're done writing all of our output, we'll get an error about a\n # closed pipe which we can safely ignore.\n pass\n\n def _build_table(self, title, current, indent_level=0):\n if not current:\n return False\n self.table.new_section(title, indent_level=indent_level)\n if isinstance(current, list):\n if isinstance(current[0], dict):\n self._build_sub_table_from_list(current, indent_level, title)\n else:\n for item in current:\n self.table.add_row([item])\n if isinstance(current, dict):\n # Render a single row section with keys as header\n # and the row as the values, unless the value\n # is a list.\n self._build_sub_table_from_dict(current, indent_level)\n return True\n\n def _build_sub_table_from_dict(self, current, indent_level):\n # Render a single row section with keys as header\n # and the row as the values, unless the value\n # is a list.\n headers, more = self._group_scalar_keys(current)\n if len(headers) == 1:\n # Special casing if a dict has a single scalar key/value pair.\n self.table.add_row([headers[0], current[headers[0]]])\n elif headers:\n self.table.add_row_header(headers)\n self.table.add_row([current[k] for k in headers])\n for remaining in more:\n self._build_table(remaining, current[remaining],\n indent_level=indent_level + 1)\n\n def _build_sub_table_from_list(self, current, indent_level, title):\n headers, more = self._group_scalar_keys(current[0])\n self.table.add_row_header(headers)\n first = True\n for element in current:\n if not first and more:\n self.table.new_section(title,\n indent_level=indent_level)\n self.table.add_row_header(headers)\n first = False\n self.table.add_row([element[header] for header in headers])\n for remaining in more:\n # Some of the non scalar attributes may not necessarily\n # be in every single element of the list, so we need to\n # check this condition before recursing.\n if remaining in element:\n self._build_table(remaining, element[remaining],\n indent_level=indent_level + 1)\n\n def _scalar_type(self, element):\n return not isinstance(element, (list, dict))\n\n def _group_scalar_keys(self, current):\n # Given a dict, separate the keys into those whose values are\n # scalar, and those whose values aren't. Return two lists,\n # one is the scalar value keys, the second is the remaining keys.\n more = []\n headers = []\n for element in current:\n if self._scalar_type(current[element]):\n headers.append(element)\n else:\n more.append(element)\n headers.sort()\n more.sort()\n return headers, more\n\n\nclass TextFormatter(FullyBufferedFormatter):\n\n def _output(self, data, stream, label=None):\n \"\"\"\n A very simple, very stupid text formatter that has no\n knowledge of the output as defined in the JSON model.\n \"\"\"\n if isinstance(data, dict):\n scalars = []\n non_scalars = []\n for key, val in data.items():\n if isinstance(val, dict):\n non_scalars.append((key, val))\n elif isinstance(val, list):\n non_scalars.append((key, val))\n elif not isinstance(val, six.string_types):\n scalars.append(str(val))\n else:\n scalars.append(val)\n if label:\n scalars.insert(0, label.upper())\n stream.write('\\t'.join(scalars))\n stream.write('\\n')\n for label, non_scalar in non_scalars:\n self._output(non_scalar, stream, label)\n elif isinstance(data, list):\n for d in data:\n self._output(d, stream)\n\n def _format_response(self, operation, response, stream):\n self._output(response, stream)\n\n\ndef get_formatter(format_type, args):\n if format_type == 'json':\n return JSONFormatter(args)\n elif format_type == 'text':\n return TextFormatter(args)\n elif format_type == 'table':\n return TableFormatter(args)\n return None\n", "path": "awscli/formatter.py"}]} | 3,229 | 473 |
gh_patches_debug_28745 | rasdani/github-patches | git_diff | SeldonIO__MLServer-531 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow to configure uvicorn logging
Trying to add custom handlers to the application logger we have found that the uvicorn ones are not working as expected. It seems that uvicorn overrides the logging configuration when the Config object is initialized.
It would be nice to allow users to configure the uvicorn logger. This can be done by setting the path to a logging configuration file when creating uvicorn's Config object.
</issue>
<code>
[start of mlserver/logging.py]
1 import logging
2 import sys
3
4 from logging import Formatter, StreamHandler
5
6 from .settings import Settings
7
8 LoggerName = "mlserver"
9 LoggerFormat = "%(asctime)s [%(name)s] %(levelname)s - %(message)s"
10
11 logger = logging.getLogger(LoggerName)
12
13
14 def get_logger():
15 return logger
16
17
18 def configure_logger(settings: Settings = None):
19 logger = get_logger()
20
21 # Don't add handler twice
22 if not logger.handlers:
23 stream_handler = StreamHandler(sys.stdout)
24 formatter = Formatter(LoggerFormat)
25 stream_handler.setFormatter(formatter)
26
27 logger.addHandler(stream_handler)
28
29 logger.setLevel(logging.INFO)
30 if settings and settings.debug:
31 logger.setLevel(logging.DEBUG)
32
33 return logger
34
[end of mlserver/logging.py]
[start of mlserver/rest/server.py]
1 import uvicorn
2
3 from ..settings import Settings
4 from ..handlers import DataPlane, ModelRepositoryHandlers, get_custom_handlers
5 from ..model import MLModel
6
7 from .utils import matches
8 from .app import create_app
9
10
11 class _NoSignalServer(uvicorn.Server):
12 def install_signal_handlers(self):
13 pass
14
15
16 class RESTServer:
17 def __init__(
18 self,
19 settings: Settings,
20 data_plane: DataPlane,
21 model_repository_handlers: ModelRepositoryHandlers,
22 ):
23 self._settings = settings
24 self._data_plane = data_plane
25 self._model_repository_handlers = model_repository_handlers
26 self._app = create_app(
27 self._settings,
28 data_plane=self._data_plane,
29 model_repository_handlers=self._model_repository_handlers,
30 )
31
32 async def add_custom_handlers(self, model: MLModel):
33 handlers = get_custom_handlers(model)
34 for custom_handler, handler_method in handlers:
35 self._app.add_api_route(
36 custom_handler.rest_path,
37 handler_method,
38 methods=[custom_handler.rest_method],
39 )
40
41 async def delete_custom_handlers(self, model: MLModel):
42 handlers = get_custom_handlers(model)
43 if len(handlers) == 0:
44 return
45
46 # NOTE: Loop in reverse, so that it's quicker to find all the recently
47 # added routes and we can remove routes on-the-fly
48 for i, route in reversed(list(enumerate(self._app.routes))):
49 for j, (custom_handler, handler_method) in enumerate(handlers):
50 if matches(route, custom_handler, handler_method): # type: ignore
51 self._app.routes.pop(i)
52 handlers.pop(j)
53
54 async def start(self):
55 cfg = uvicorn.Config(
56 self._app, host=self._settings.host, port=self._settings.http_port
57 )
58 self._server = _NoSignalServer(cfg)
59 await self._server.serve()
60
61 async def stop(self):
62 self._server.handle_exit(sig=None, frame=None)
63
[end of mlserver/rest/server.py]
[start of mlserver/settings.py]
1 from typing import List, Optional
2 from pydantic import BaseSettings, PyObject
3
4 from .version import __version__
5 from .types import MetadataTensor
6
7 ENV_PREFIX_SETTINGS = "MLSERVER_"
8 ENV_PREFIX_MODEL_SETTINGS = "MLSERVER_MODEL_"
9
10
11 class CORSSettings(BaseSettings):
12 class Config:
13 env_prefix = ENV_PREFIX_SETTINGS
14
15 allow_origins: Optional[List[str]] = []
16 """
17 A list of origins that should be permitted to make
18 cross-origin requests. E.g. ['https://example.org', 'https://www.example.org'].
19 You can use ['*'] to allow any origin
20 """
21
22 allow_origin_regex: Optional[str] = None
23 """
24 A regex string to match against origins that
25 should be permitted to make cross-origin requests.
26 e.g. 'https:\\/\\/.*\\.example\\.org'
27 """
28
29 allow_credentials: Optional[bool] = False
30 """Indicate that cookies should be supported for cross-origin requests"""
31
32 allow_methods: Optional[List[str]] = ["GET"]
33 """A list of HTTP methods that should be allowed for cross-origin requests"""
34
35 allow_headers: Optional[List[str]] = []
36 """A list of HTTP request headers that should be supported for
37 cross-origin requests"""
38
39 expose_headers: Optional[List[str]] = []
40 """Indicate any response headers that should be made accessible to the browser"""
41
42 max_age: Optional[int] = 600
43 """Sets a maximum time in seconds for browsers to cache CORS responses"""
44
45
46 class Settings(BaseSettings):
47 class Config:
48 env_prefix = ENV_PREFIX_SETTINGS
49
50 debug: bool = True
51
52 # Model repository settings
53 model_repository_root: str = "."
54 """Root of the model repository, where we will search for models."""
55
56 load_models_at_startup: bool = True
57 """Flag to load all available models automatically at startup."""
58
59 # Server metadata
60 server_name: str = "mlserver"
61 """Name of the server."""
62
63 server_version: str = __version__
64 """Version of the server."""
65
66 extensions: List[str] = []
67 """Server extensions loaded."""
68
69 # Server settings
70 host: str = "0.0.0.0"
71 """Host where to listen for connections."""
72
73 http_port: int = 8080
74 """Port where to listen for HTTP / REST connections."""
75
76 grpc_port: int = 8081
77 """Port where to listen for gRPC connections."""
78
79 grpc_max_message_length: Optional[int] = None
80 """Maximum length (i.e. size) of gRPC payloads."""
81
82 # CORS settings
83 cors_settings: Optional[CORSSettings] = None
84
85 # Metrics settings
86 metrics_endpoint: Optional[str] = "/metrics"
87 """
88 Endpoint used to expose Prometheus metrics. Alternatively, can be set to
89 `None` to disable it
90 """
91
92
93 class ModelParameters(BaseSettings):
94 """
95 Parameters that apply only to a particular instance of a model.
96 This can include things like model weights, or arbitrary ``extra``
97 parameters particular to the underlying inference runtime.
98 The main difference with respect to ``ModelSettings`` is that parameters
99 can change on each instance (e.g. each version) of the model.
100 """
101
102 class Config:
103 env_prefix = ENV_PREFIX_MODEL_SETTINGS
104
105 uri: Optional[str] = None
106 """
107 URI where the model artifacts can be found.
108 This path must be either absolute or relative to where MLServer is running.
109 """
110
111 version: Optional[str] = None
112 """Version of the model."""
113
114 format: Optional[str] = None
115 """Format of the model (only available on certain runtimes)."""
116
117 content_type: Optional[str] = None
118 """Default content type to use for requests and responses."""
119
120 extra: Optional[dict] = {}
121 """Arbitrary settings, dependent on the inference runtime
122 implementation."""
123
124
125 class ModelSettings(BaseSettings):
126 class Config:
127 env_prefix = ENV_PREFIX_MODEL_SETTINGS
128 underscore_attrs_are_private = True
129
130 # Source points to the file where model settings were loaded from
131 _source: Optional[str] = None
132
133 name: str = ""
134 """Name of the model."""
135
136 # Model metadata
137 platform: str = ""
138 """Framework used to train and serialise the model (e.g. sklearn)."""
139
140 versions: List[str] = []
141 """Versions of dependencies used to train the model (e.g.
142 sklearn/0.20.1)."""
143
144 inputs: List[MetadataTensor] = []
145 """Metadata about the inputs accepted by the model."""
146
147 outputs: List[MetadataTensor] = []
148 """Metadata about the outputs returned by the model."""
149
150 # Parallel settings
151 parallel_workers: int = 4
152 """When parallel inference is enabled, number of workers to run inference
153 across."""
154
155 warm_workers: bool = False
156 """When parallel inference is enabled, optionally load model to all workers
157 on startup"""
158
159 # Adaptive Batching settings (disabled by default)
160 max_batch_size: int = 0
161 """When adaptive batching is enabled, maximum number of requests to group
162 together in a single batch."""
163
164 max_batch_time: float = 0.0
165 """When adaptive batching is enabled, maximum amount of time (in seconds)
166 to wait for enough requests to build a full batch."""
167
168 # Custom model class implementation
169 implementation: PyObject = "mlserver.model.MLModel" # type: ignore
170 """*Python path* to the inference runtime to use to serve this model (e.g.
171 ``mlserver_sklearn.SKLearnModel``)."""
172
173 # Model parameters are meant to be set directly by the MLServer runtime.
174 # However, it's also possible to override them manually.
175 parameters: Optional[ModelParameters] = None
176 """Extra parameters for each instance of this model."""
177
[end of mlserver/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlserver/logging.py b/mlserver/logging.py
--- a/mlserver/logging.py
+++ b/mlserver/logging.py
@@ -2,6 +2,7 @@
import sys
from logging import Formatter, StreamHandler
+import logging.config
from .settings import Settings
@@ -15,6 +16,10 @@
return logger
+def apply_logging_file(logging_settings: str):
+ logging.config.fileConfig(fname=logging_settings, disable_existing_loggers=False)
+
+
def configure_logger(settings: Settings = None):
logger = get_logger()
@@ -30,4 +35,7 @@
if settings and settings.debug:
logger.setLevel(logging.DEBUG)
+ if settings and settings.logging_settings:
+ apply_logging_file(settings.logging_settings)
+
return logger
diff --git a/mlserver/rest/server.py b/mlserver/rest/server.py
--- a/mlserver/rest/server.py
+++ b/mlserver/rest/server.py
@@ -53,7 +53,10 @@
async def start(self):
cfg = uvicorn.Config(
- self._app, host=self._settings.host, port=self._settings.http_port
+ self._app,
+ host=self._settings.host,
+ port=self._settings.http_port,
+ log_config=self._settings.logging_settings,
)
self._server = _NoSignalServer(cfg)
await self._server.serve()
diff --git a/mlserver/settings.py b/mlserver/settings.py
--- a/mlserver/settings.py
+++ b/mlserver/settings.py
@@ -89,6 +89,10 @@
`None` to disable it
"""
+ # Logging settings
+ logging_settings: Optional[str] = None
+ """Path to logging config file"""
+
class ModelParameters(BaseSettings):
"""
| {"golden_diff": "diff --git a/mlserver/logging.py b/mlserver/logging.py\n--- a/mlserver/logging.py\n+++ b/mlserver/logging.py\n@@ -2,6 +2,7 @@\n import sys\n \n from logging import Formatter, StreamHandler\n+import logging.config\n \n from .settings import Settings\n \n@@ -15,6 +16,10 @@\n return logger\n \n \n+def apply_logging_file(logging_settings: str):\n+ logging.config.fileConfig(fname=logging_settings, disable_existing_loggers=False)\n+\n+\n def configure_logger(settings: Settings = None):\n logger = get_logger()\n \n@@ -30,4 +35,7 @@\n if settings and settings.debug:\n logger.setLevel(logging.DEBUG)\n \n+ if settings and settings.logging_settings:\n+ apply_logging_file(settings.logging_settings)\n+\n return logger\ndiff --git a/mlserver/rest/server.py b/mlserver/rest/server.py\n--- a/mlserver/rest/server.py\n+++ b/mlserver/rest/server.py\n@@ -53,7 +53,10 @@\n \n async def start(self):\n cfg = uvicorn.Config(\n- self._app, host=self._settings.host, port=self._settings.http_port\n+ self._app,\n+ host=self._settings.host,\n+ port=self._settings.http_port,\n+ log_config=self._settings.logging_settings,\n )\n self._server = _NoSignalServer(cfg)\n await self._server.serve()\ndiff --git a/mlserver/settings.py b/mlserver/settings.py\n--- a/mlserver/settings.py\n+++ b/mlserver/settings.py\n@@ -89,6 +89,10 @@\n `None` to disable it\n \"\"\"\n \n+ # Logging settings\n+ logging_settings: Optional[str] = None\n+ \"\"\"Path to logging config file\"\"\"\n+\n \n class ModelParameters(BaseSettings):\n \"\"\"\n", "issue": "Allow to configure uvicorn logging\nTrying to add custom handlers to the application logger we have found that the uvicorn ones are not working as expected. It seems that uvicorn overrides the logging configuration when the Config object is initialized.\r\n\r\nIt would be nice to allow users to configure the uvicorn logger. This can be done by setting the path to a logging configuration file when creating uvicorn's Config object.\n", "before_files": [{"content": "import logging\nimport sys\n\nfrom logging import Formatter, StreamHandler\n\nfrom .settings import Settings\n\nLoggerName = \"mlserver\"\nLoggerFormat = \"%(asctime)s [%(name)s] %(levelname)s - %(message)s\"\n\nlogger = logging.getLogger(LoggerName)\n\n\ndef get_logger():\n return logger\n\n\ndef configure_logger(settings: Settings = None):\n logger = get_logger()\n\n # Don't add handler twice\n if not logger.handlers:\n stream_handler = StreamHandler(sys.stdout)\n formatter = Formatter(LoggerFormat)\n stream_handler.setFormatter(formatter)\n\n logger.addHandler(stream_handler)\n\n logger.setLevel(logging.INFO)\n if settings and settings.debug:\n logger.setLevel(logging.DEBUG)\n\n return logger\n", "path": "mlserver/logging.py"}, {"content": "import uvicorn\n\nfrom ..settings import Settings\nfrom ..handlers import DataPlane, ModelRepositoryHandlers, get_custom_handlers\nfrom ..model import MLModel\n\nfrom .utils import matches\nfrom .app import create_app\n\n\nclass _NoSignalServer(uvicorn.Server):\n def install_signal_handlers(self):\n pass\n\n\nclass RESTServer:\n def __init__(\n self,\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n ):\n self._settings = settings\n self._data_plane = data_plane\n self._model_repository_handlers = model_repository_handlers\n self._app = create_app(\n self._settings,\n data_plane=self._data_plane,\n model_repository_handlers=self._model_repository_handlers,\n )\n\n async def add_custom_handlers(self, model: MLModel):\n handlers = get_custom_handlers(model)\n for custom_handler, handler_method in handlers:\n self._app.add_api_route(\n custom_handler.rest_path,\n handler_method,\n methods=[custom_handler.rest_method],\n )\n\n async def delete_custom_handlers(self, model: MLModel):\n handlers = get_custom_handlers(model)\n if len(handlers) == 0:\n return\n\n # NOTE: Loop in reverse, so that it's quicker to find all the recently\n # added routes and we can remove routes on-the-fly\n for i, route in reversed(list(enumerate(self._app.routes))):\n for j, (custom_handler, handler_method) in enumerate(handlers):\n if matches(route, custom_handler, handler_method): # type: ignore\n self._app.routes.pop(i)\n handlers.pop(j)\n\n async def start(self):\n cfg = uvicorn.Config(\n self._app, host=self._settings.host, port=self._settings.http_port\n )\n self._server = _NoSignalServer(cfg)\n await self._server.serve()\n\n async def stop(self):\n self._server.handle_exit(sig=None, frame=None)\n", "path": "mlserver/rest/server.py"}, {"content": "from typing import List, Optional\nfrom pydantic import BaseSettings, PyObject\n\nfrom .version import __version__\nfrom .types import MetadataTensor\n\nENV_PREFIX_SETTINGS = \"MLSERVER_\"\nENV_PREFIX_MODEL_SETTINGS = \"MLSERVER_MODEL_\"\n\n\nclass CORSSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n allow_origins: Optional[List[str]] = []\n \"\"\"\n A list of origins that should be permitted to make\n cross-origin requests. E.g. ['https://example.org', 'https://www.example.org'].\n You can use ['*'] to allow any origin\n \"\"\"\n\n allow_origin_regex: Optional[str] = None\n \"\"\"\n A regex string to match against origins that\n should be permitted to make cross-origin requests.\n e.g. 'https:\\\\/\\\\/.*\\\\.example\\\\.org'\n \"\"\"\n\n allow_credentials: Optional[bool] = False\n \"\"\"Indicate that cookies should be supported for cross-origin requests\"\"\"\n\n allow_methods: Optional[List[str]] = [\"GET\"]\n \"\"\"A list of HTTP methods that should be allowed for cross-origin requests\"\"\"\n\n allow_headers: Optional[List[str]] = []\n \"\"\"A list of HTTP request headers that should be supported for\n cross-origin requests\"\"\"\n\n expose_headers: Optional[List[str]] = []\n \"\"\"Indicate any response headers that should be made accessible to the browser\"\"\"\n\n max_age: Optional[int] = 600\n \"\"\"Sets a maximum time in seconds for browsers to cache CORS responses\"\"\"\n\n\nclass Settings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n debug: bool = True\n\n # Model repository settings\n model_repository_root: str = \".\"\n \"\"\"Root of the model repository, where we will search for models.\"\"\"\n\n load_models_at_startup: bool = True\n \"\"\"Flag to load all available models automatically at startup.\"\"\"\n\n # Server metadata\n server_name: str = \"mlserver\"\n \"\"\"Name of the server.\"\"\"\n\n server_version: str = __version__\n \"\"\"Version of the server.\"\"\"\n\n extensions: List[str] = []\n \"\"\"Server extensions loaded.\"\"\"\n\n # Server settings\n host: str = \"0.0.0.0\"\n \"\"\"Host where to listen for connections.\"\"\"\n\n http_port: int = 8080\n \"\"\"Port where to listen for HTTP / REST connections.\"\"\"\n\n grpc_port: int = 8081\n \"\"\"Port where to listen for gRPC connections.\"\"\"\n\n grpc_max_message_length: Optional[int] = None\n \"\"\"Maximum length (i.e. size) of gRPC payloads.\"\"\"\n\n # CORS settings\n cors_settings: Optional[CORSSettings] = None\n\n # Metrics settings\n metrics_endpoint: Optional[str] = \"/metrics\"\n \"\"\"\n Endpoint used to expose Prometheus metrics. Alternatively, can be set to\n `None` to disable it\n \"\"\"\n\n\nclass ModelParameters(BaseSettings):\n \"\"\"\n Parameters that apply only to a particular instance of a model.\n This can include things like model weights, or arbitrary ``extra``\n parameters particular to the underlying inference runtime.\n The main difference with respect to ``ModelSettings`` is that parameters\n can change on each instance (e.g. each version) of the model.\n \"\"\"\n\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n uri: Optional[str] = None\n \"\"\"\n URI where the model artifacts can be found.\n This path must be either absolute or relative to where MLServer is running.\n \"\"\"\n\n version: Optional[str] = None\n \"\"\"Version of the model.\"\"\"\n\n format: Optional[str] = None\n \"\"\"Format of the model (only available on certain runtimes).\"\"\"\n\n content_type: Optional[str] = None\n \"\"\"Default content type to use for requests and responses.\"\"\"\n\n extra: Optional[dict] = {}\n \"\"\"Arbitrary settings, dependent on the inference runtime\n implementation.\"\"\"\n\n\nclass ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n underscore_attrs_are_private = True\n\n # Source points to the file where model settings were loaded from\n _source: Optional[str] = None\n\n name: str = \"\"\n \"\"\"Name of the model.\"\"\"\n\n # Model metadata\n platform: str = \"\"\n \"\"\"Framework used to train and serialise the model (e.g. sklearn).\"\"\"\n\n versions: List[str] = []\n \"\"\"Versions of dependencies used to train the model (e.g.\n sklearn/0.20.1).\"\"\"\n\n inputs: List[MetadataTensor] = []\n \"\"\"Metadata about the inputs accepted by the model.\"\"\"\n\n outputs: List[MetadataTensor] = []\n \"\"\"Metadata about the outputs returned by the model.\"\"\"\n\n # Parallel settings\n parallel_workers: int = 4\n \"\"\"When parallel inference is enabled, number of workers to run inference\n across.\"\"\"\n\n warm_workers: bool = False\n \"\"\"When parallel inference is enabled, optionally load model to all workers\n on startup\"\"\"\n\n # Adaptive Batching settings (disabled by default)\n max_batch_size: int = 0\n \"\"\"When adaptive batching is enabled, maximum number of requests to group\n together in a single batch.\"\"\"\n\n max_batch_time: float = 0.0\n \"\"\"When adaptive batching is enabled, maximum amount of time (in seconds)\n to wait for enough requests to build a full batch.\"\"\"\n\n # Custom model class implementation\n implementation: PyObject = \"mlserver.model.MLModel\" # type: ignore\n \"\"\"*Python path* to the inference runtime to use to serve this model (e.g.\n ``mlserver_sklearn.SKLearnModel``).\"\"\"\n\n # Model parameters are meant to be set directly by the MLServer runtime.\n # However, it's also possible to override them manually.\n parameters: Optional[ModelParameters] = None\n \"\"\"Extra parameters for each instance of this model.\"\"\"\n", "path": "mlserver/settings.py"}]} | 3,117 | 391 |
gh_patches_debug_652 | rasdani/github-patches | git_diff | pex-tool__pex-2086 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.127
On the docket:
+ [x] Pex fails to subset a "foo @ file:///bar" URL lock. #2083
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.126"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.126"
+__version__ = "2.1.127"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.126\"\n+__version__ = \"2.1.127\"\n", "issue": "Release 2.1.127\nOn the docket:\r\n+ [x] Pex fails to subset a \"foo @ file:///bar\" URL lock. #2083\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.126\"\n", "path": "pex/version.py"}]} | 624 | 98 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.